forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 1f93a7dfd1f8d5ff7a5c53246c7534fe2332d6f4
kernel/drivers/pci/controller/dwc/pcie-dw-rockchip.c
....@@ -14,6 +14,7 @@
1414 #include <linux/gpio.h>
1515 #include <linux/init.h>
1616 #include <linux/interrupt.h>
17
+#include <linux/iopoll.h>
1718 #include <linux/irq.h>
1819 #include <linux/irqchip/chained_irq.h>
1920 #include <linux/irqdomain.h>
....@@ -29,11 +30,13 @@
2930 #include <linux/of_pci.h>
3031 #include <linux/pci.h>
3132 #include <linux/phy/phy.h>
33
+#include <linux/phy/pcie.h>
3234 #include <linux/platform_device.h>
3335 #include <linux/poll.h>
3436 #include <linux/regmap.h>
3537 #include <linux/reset.h>
3638 #include <linux/resource.h>
39
+#include <linux/rfkill-wlan.h>
3740 #include <linux/signal.h>
3841 #include <linux/types.h>
3942 #include <linux/uaccess.h>
....@@ -49,13 +52,11 @@
4952 RK_PCIE_RC_TYPE,
5053 };
5154
52
-struct reset_bulk_data {
53
- const char *id;
54
- struct reset_control *rst;
55
-};
55
+#define RK_PCIE_DBG 0
5656
5757 #define PCIE_DMA_OFFSET 0x380000
5858
59
+#define PCIE_DMA_CTRL_OFF 0x8
5960 #define PCIE_DMA_WR_ENB 0xc
6061 #define PCIE_DMA_WR_CTRL_LO 0x200
6162 #define PCIE_DMA_WR_CTRL_HI 0x204
....@@ -99,6 +100,8 @@
99100
100101 #define PCIE_CAP_LINK_CONTROL2_LINK_STATUS 0xa0
101102
103
+#define PCIE_CLIENT_INTR_STATUS_MSG_RX 0x04
104
+#define PME_TO_ACK (BIT(9) | BIT(25))
102105 #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x08
103106 #define PCIE_CLIENT_INTR_STATUS_MISC 0x10
104107 #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
....@@ -106,12 +109,17 @@
106109 #define MASK_LEGACY_INT(x) (0x00110011 << x)
107110 #define UNMASK_LEGACY_INT(x) (0x00110000 << x)
108111 #define PCIE_CLIENT_INTR_MASK 0x24
112
+#define PCIE_CLIENT_POWER 0x2c
113
+#define READY_ENTER_L23 BIT(3)
114
+#define PCIE_CLIENT_MSG_GEN 0x34
115
+#define PME_TURN_OFF (BIT(4) | BIT(20))
109116 #define PCIE_CLIENT_GENERAL_DEBUG 0x104
110117 #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
111118 #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
112119 #define PCIE_CLIENT_LTSSM_STATUS 0x300
113120 #define SMLH_LINKUP BIT(16)
114121 #define RDLH_LINKUP BIT(17)
122
+#define PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN 0x154
115123 #define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310
116124 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320
117125 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324
....@@ -119,21 +127,30 @@
119127 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c
120128 #define PCIE_CLIENT_DBG_FIFO_STATUS 0x350
121129 #define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000
122
-#define PCIE_CLIENT_DBF_EN 0xffff0003
123
-#define RK_PCIE_DBG 0
130
+#define PCIE_CLIENT_DBF_EN 0xffff0007
124131
125132 #define PCIE_PHY_LINKUP BIT(0)
126133 #define PCIE_DATA_LINKUP BIT(1)
127134
128
-#define PCIE_RESBAR_CTRL_REG0_REG 0x2a8
135
+#define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000
129136 #define PCIE_SB_BAR0_MASK_REG 0x100010
130137
131138 #define PCIE_PL_ORDER_RULE_CTRL_OFF 0x8B4
139
+#define RK_PCIE_L2_TMOUT_US 5000
140
+
141
+enum rk_pcie_ltssm_code {
142
+ S_L0 = 0x11,
143
+ S_L0S = 0x12,
144
+ S_L1_IDLE = 0x14,
145
+ S_L2_IDLE = 0x15,
146
+ S_MAX = 0x1f,
147
+};
132148
133149 struct rk_pcie {
134150 struct dw_pcie *pci;
135151 enum rk_pcie_device_mode mode;
136152 enum phy_mode phy_mode;
153
+ int phy_sub_mode;
137154 unsigned char bar_to_atu[6];
138155 phys_addr_t *outbound_addr;
139156 unsigned long *ib_window_map;
....@@ -144,9 +161,11 @@
144161 void __iomem *apb_base;
145162 struct phy *phy;
146163 struct clk_bulk_data *clks;
164
+ struct reset_control *rsts;
147165 unsigned int clk_cnt;
148
- struct reset_bulk_data *rsts;
149166 struct gpio_desc *rst_gpio;
167
+ u32 perst_inactive_ms;
168
+ struct gpio_desc *prsnt_gpio;
150169 phys_addr_t mem_start;
151170 size_t mem_size;
152171 struct pcie_port pp;
....@@ -154,21 +173,26 @@
154173 struct regmap *pmu_grf;
155174 struct dma_trx_obj *dma_obj;
156175 bool in_suspend;
176
+ bool skip_scan_in_resume;
157177 bool is_rk1808;
158178 bool is_signal_test;
159179 bool bifurcation;
180
+ bool supports_clkreq;
160181 struct regulator *vpcie3v3;
161182 struct irq_domain *irq_domain;
162183 raw_spinlock_t intx_lock;
184
+ u16 aspm;
185
+ u32 l1ss_ctl1;
163186 struct dentry *debugfs;
187
+ u32 msi_vector_num;
164188 };
165189
166190 struct rk_pcie_of_data {
167191 enum rk_pcie_device_mode mode;
192
+ u32 msi_vector_num;
168193 };
169194
170195 #define to_rk_pcie(x) dev_get_drvdata((x)->dev)
171
-static const struct dev_pm_ops rockchip_dw_pcie_pm_ops;
172196
173197 static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
174198 {
....@@ -253,12 +277,154 @@
253277 return 0;
254278 }
255279
280
+static void rk_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
281
+{
282
+ int ret;
283
+
284
+ if (pci->ops->write_dbi) {
285
+ pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
286
+ return;
287
+ }
288
+
289
+ ret = dw_pcie_write(pci->atu_base + reg, 4, val);
290
+ if (ret)
291
+ dev_err(pci->dev, "Write ATU address failed\n");
292
+}
293
+
294
+static void rk_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
295
+ u32 val)
296
+{
297
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
298
+
299
+ rk_pcie_writel_atu(pci, offset + reg, val);
300
+}
301
+
302
+static u32 rk_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
303
+{
304
+ int ret;
305
+ u32 val;
306
+
307
+ if (pci->ops->read_dbi)
308
+ return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
309
+
310
+ ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
311
+ if (ret)
312
+ dev_err(pci->dev, "Read ATU address failed\n");
313
+
314
+ return val;
315
+}
316
+
317
+static u32 rk_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
318
+{
319
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
320
+
321
+ return rk_pcie_readl_atu(pci, offset + reg);
322
+}
323
+
324
+static int rk_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
325
+ int index, int bar, u64 cpu_addr,
326
+ enum dw_pcie_as_type as_type)
327
+{
328
+ int type;
329
+ u32 retries, val;
330
+
331
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
332
+ lower_32_bits(cpu_addr));
333
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
334
+ upper_32_bits(cpu_addr));
335
+
336
+ switch (as_type) {
337
+ case DW_PCIE_AS_MEM:
338
+ type = PCIE_ATU_TYPE_MEM;
339
+ break;
340
+ case DW_PCIE_AS_IO:
341
+ type = PCIE_ATU_TYPE_IO;
342
+ break;
343
+ default:
344
+ return -EINVAL;
345
+ }
346
+
347
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
348
+ PCIE_ATU_FUNC_NUM(func_no));
349
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
350
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
351
+ PCIE_ATU_ENABLE |
352
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
353
+
354
+ /*
355
+ * Make sure ATU enable takes effect before any subsequent config
356
+ * and I/O accesses.
357
+ */
358
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
359
+ val = rk_pcie_readl_ib_unroll(pci, index,
360
+ PCIE_ATU_UNR_REGION_CTRL2);
361
+ if (val & PCIE_ATU_ENABLE)
362
+ return 0;
363
+
364
+ mdelay(LINK_WAIT_IATU);
365
+ }
366
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
367
+
368
+ return -EBUSY;
369
+}
370
+
371
+
372
+static int rk_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
373
+ int bar, u64 cpu_addr,
374
+ enum dw_pcie_as_type as_type)
375
+{
376
+ int type;
377
+ u32 retries, val;
378
+
379
+ if (pci->iatu_unroll_enabled)
380
+ return rk_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
381
+ cpu_addr, as_type);
382
+
383
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
384
+ index);
385
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
386
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
387
+
388
+ switch (as_type) {
389
+ case DW_PCIE_AS_MEM:
390
+ type = PCIE_ATU_TYPE_MEM;
391
+ break;
392
+ case DW_PCIE_AS_IO:
393
+ type = PCIE_ATU_TYPE_IO;
394
+ break;
395
+ default:
396
+ return -EINVAL;
397
+ }
398
+
399
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
400
+ PCIE_ATU_FUNC_NUM(func_no));
401
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
402
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
403
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
404
+
405
+ /*
406
+ * Make sure ATU enable takes effect before any subsequent config
407
+ * and I/O accesses.
408
+ */
409
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
410
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
411
+ if (val & PCIE_ATU_ENABLE)
412
+ return 0;
413
+
414
+ mdelay(LINK_WAIT_IATU);
415
+ }
416
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
417
+
418
+ return -EBUSY;
419
+}
420
+
256421 static int rk_pcie_ep_inbound_atu(struct rk_pcie *rk_pcie,
257422 enum pci_barno bar, dma_addr_t cpu_addr,
258423 enum dw_pcie_as_type as_type)
259424 {
260425 int ret;
261426 u32 free_win;
427
+ u8 func_no = 0x0;
262428
263429 if (rk_pcie->in_suspend) {
264430 free_win = rk_pcie->bar_to_atu[bar];
....@@ -271,8 +437,8 @@
271437 }
272438 }
273439
274
- ret = dw_pcie_prog_inbound_atu(rk_pcie->pci, free_win, bar, cpu_addr,
275
- as_type);
440
+ ret = rk_pcie_prog_inbound_atu(rk_pcie->pci, func_no, free_win, bar,
441
+ cpu_addr, as_type);
276442 if (ret < 0) {
277443 dev_err(rk_pcie->pci->dev, "Failed to program IB window\n");
278444 return ret;
....@@ -285,6 +451,105 @@
285451 set_bit(free_win, rk_pcie->ib_window_map);
286452
287453 return 0;
454
+}
455
+
456
+static void rk_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
457
+ u32 val)
458
+{
459
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
460
+
461
+ rk_pcie_writel_atu(pci, offset + reg, val);
462
+}
463
+
464
+static u32 rk_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
465
+{
466
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
467
+
468
+ return rk_pcie_readl_atu(pci, offset + reg);
469
+}
470
+
471
+static void rk_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
472
+ int index, int type,
473
+ u64 cpu_addr, u64 pci_addr,
474
+ u32 size)
475
+{
476
+ u32 retries, val;
477
+ u64 limit_addr = cpu_addr + size - 1;
478
+
479
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
480
+ lower_32_bits(cpu_addr));
481
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
482
+ upper_32_bits(cpu_addr));
483
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
484
+ lower_32_bits(limit_addr));
485
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
486
+ upper_32_bits(limit_addr));
487
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
488
+ lower_32_bits(pci_addr));
489
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
490
+ upper_32_bits(pci_addr));
491
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
492
+ type | PCIE_ATU_FUNC_NUM(func_no));
493
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
494
+ PCIE_ATU_ENABLE);
495
+
496
+ /*
497
+ * Make sure ATU enable takes effect before any subsequent config
498
+ * and I/O accesses.
499
+ */
500
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
501
+ val = rk_pcie_readl_ob_unroll(pci, index,
502
+ PCIE_ATU_UNR_REGION_CTRL2);
503
+ if (val & PCIE_ATU_ENABLE)
504
+ return;
505
+
506
+ mdelay(LINK_WAIT_IATU);
507
+ }
508
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
509
+}
510
+
511
+static void rk_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
512
+ int type, u64 cpu_addr, u64 pci_addr, u32 size)
513
+{
514
+ u32 retries, val;
515
+
516
+ if (pci->ops->cpu_addr_fixup)
517
+ cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
518
+
519
+ if (pci->iatu_unroll_enabled) {
520
+ rk_pcie_prog_outbound_atu_unroll(pci, 0x0, index, type,
521
+ cpu_addr, pci_addr, size);
522
+ return;
523
+ }
524
+
525
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
526
+ PCIE_ATU_REGION_OUTBOUND | index);
527
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
528
+ lower_32_bits(cpu_addr));
529
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
530
+ upper_32_bits(cpu_addr));
531
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
532
+ lower_32_bits(cpu_addr + size - 1));
533
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
534
+ lower_32_bits(pci_addr));
535
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
536
+ upper_32_bits(pci_addr));
537
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
538
+ PCIE_ATU_FUNC_NUM(0x0));
539
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
540
+
541
+ /*
542
+ * Make sure ATU enable takes effect before any subsequent config
543
+ * and I/O accesses.
544
+ */
545
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
546
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
547
+ if (val & PCIE_ATU_ENABLE)
548
+ return;
549
+
550
+ mdelay(LINK_WAIT_IATU);
551
+ }
552
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
288553 }
289554
290555 static int rk_pcie_ep_outbound_atu(struct rk_pcie *rk_pcie,
....@@ -305,7 +570,7 @@
305570 }
306571 }
307572
308
- dw_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
573
+ rk_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
309574 phys_addr, pci_addr, size);
310575
311576 if (rk_pcie->in_suspend)
....@@ -362,6 +627,28 @@
362627 return 0;
363628 }
364629
630
+#if defined(CONFIG_PCIEASPM)
631
+static void disable_aspm_l1ss(struct rk_pcie *rk_pcie)
632
+{
633
+ u32 val, cfg_link_cap_l1sub;
634
+
635
+ val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_L1SS);
636
+ if (!val) {
637
+ dev_err(rk_pcie->pci->dev, "can't find l1ss cap\n");
638
+
639
+ return;
640
+ }
641
+
642
+ cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
643
+
644
+ val = dw_pcie_readl_dbi(rk_pcie->pci, cfg_link_cap_l1sub);
645
+ val &= ~(PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2 | PCI_L1SS_CAP_L1_PM_SS);
646
+ dw_pcie_writel_dbi(rk_pcie->pci, cfg_link_cap_l1sub, val);
647
+}
648
+#else
649
+static inline void disable_aspm_l1ss(struct rk_pcie *rk_pcie) { return; }
650
+#endif
651
+
365652 static inline void rk_pcie_set_mode(struct rk_pcie *rk_pcie)
366653 {
367654 switch (rk_pcie->mode) {
....@@ -369,6 +656,14 @@
369656 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00000);
370657 break;
371658 case RK_PCIE_RC_TYPE:
659
+ if (rk_pcie->supports_clkreq) {
660
+ /* Application is ready to have reference clock removed */
661
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x00010001);
662
+ } else {
663
+ /* Pull down CLKREQ# to assert the connecting CLOCK_GEN OE */
664
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x30011000);
665
+ disable_aspm_l1ss(rk_pcie);
666
+ }
372667 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00040);
373668 /*
374669 * Disable order rule for CPL can't pass halted P queue.
....@@ -405,13 +700,11 @@
405700
406701 if (rk_pcie->is_rk1808) {
407702 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG);
408
- if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3 &&
409
- ((val & GENMASK(15, 10)) >> 10) == 0x11)
703
+ if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3)
410704 return 1;
411705 } else {
412706 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS);
413
- if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000 &&
414
- (val & GENMASK(5, 0)) == 0x11)
707
+ if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000)
415708 return 1;
416709 }
417710
....@@ -420,7 +713,8 @@
420713
421714 static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie)
422715 {
423
-#if RK_PCIE_DBG
716
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
717
+ return;
424718 if (rk_pcie->is_rk1808 == true)
425719 return;
426720
....@@ -434,7 +728,6 @@
434728 PCIE_CLIENT_DBG_TRANSITION_DATA);
435729 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON,
436730 PCIE_CLIENT_DBF_EN);
437
-#endif
438731 }
439732
440733 static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie)
....@@ -453,13 +746,22 @@
453746
454747 static int rk_pcie_establish_link(struct dw_pcie *pci)
455748 {
456
- int retries;
749
+ int retries, power;
457750 struct rk_pcie *rk_pcie = to_rk_pcie(pci);
751
+ bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
458752
459
- if (dw_pcie_link_up(pci)) {
753
+ /*
754
+ * For standard RC, even if the link has been setup by firmware,
755
+ * we still need to reset link as we need to remove all resource info
756
+ * from devices, for instance BAR, as it wasn't assigned by kernel.
757
+ */
758
+ if (dw_pcie_link_up(pci) && !std_rc) {
460759 dev_err(pci->dev, "link is already up\n");
461760 return 0;
462761 }
762
+
763
+ /* Rest the device */
764
+ gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
463765
464766 rk_pcie_disable_ltssm(rk_pcie);
465767 rk_pcie_link_status_clear(rk_pcie);
....@@ -472,16 +774,38 @@
472774 rk_pcie_enable_ltssm(rk_pcie);
473775
474776 /*
777
+ * In resume routine, function devices' resume function must be late after
778
+ * controllers'. Some devices, such as Wi-Fi, need special IO setting before
779
+ * finishing training. So there must be timeout here. These kinds of devices
780
+ * need rescan devices by its driver when used. So no need to waste time waiting
781
+ * for training pass.
782
+ */
783
+ if (rk_pcie->in_suspend && rk_pcie->skip_scan_in_resume) {
784
+ rfkill_get_wifi_power_state(&power);
785
+ if (!power) {
786
+ gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
787
+ return 0;
788
+ }
789
+ }
790
+
791
+ /*
475792 * PCIe requires the refclk to be stable for 100µs prior to releasing
476793 * PERST and T_PVPERL (Power stable to PERST# inactive) should be a
477794 * minimum of 100ms. See table 2-4 in section 2.6.2 AC, the PCI Express
478795 * Card Electromechanical Specification 3.0. So 100ms in total is the min
479
- * requuirement here. We add a 1s for sake of hoping everthings work fine.
796
+ * requuirement here. We add a 200ms by default for sake of hoping everthings
797
+ * work fine. If it doesn't, please add more in DT node by add rockchip,perst-inactive-ms.
480798 */
481
- msleep(1000);
799
+ msleep(rk_pcie->perst_inactive_ms);
482800 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
483801
484
- for (retries = 0; retries < 10; retries++) {
802
+ /*
803
+ * Add this 1ms delay because we observe link is always up stably after it and
804
+ * could help us save 20ms for scanning devices.
805
+ */
806
+ usleep_range(1000, 1100);
807
+
808
+ for (retries = 0; retries < 100; retries++) {
485809 if (dw_pcie_link_up(pci)) {
486810 /*
487811 * We may be here in case of L0 in Gen1. But if EP is capable
....@@ -490,17 +814,20 @@
490814 * that LTSSM max timeout is 24ms per period, we can wait a bit
491815 * more for Gen switch.
492816 */
493
- msleep(100);
494
- dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
495
- rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
496
- rk_pcie_debug_dump(rk_pcie);
497
- return 0;
817
+ msleep(50);
818
+ /* In case link drop after linkup, double check it */
819
+ if (dw_pcie_link_up(pci)) {
820
+ dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
821
+ rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
822
+ rk_pcie_debug_dump(rk_pcie);
823
+ return 0;
824
+ }
498825 }
499826
500827 dev_info_ratelimited(pci->dev, "PCIe Linking... LTSSM is 0x%x\n",
501828 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
502829 rk_pcie_debug_dump(rk_pcie);
503
- msleep(1000);
830
+ msleep(20);
504831 }
505832
506833 dev_err(pci->dev, "PCIe Link Fail\n");
....@@ -508,20 +835,31 @@
508835 return rk_pcie->is_signal_test == true ? 0 : -EINVAL;
509836 }
510837
838
+static bool rk_pcie_udma_enabled(struct rk_pcie *rk_pcie)
839
+{
840
+ return dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
841
+ PCIE_DMA_CTRL_OFF);
842
+}
843
+
511844 static int rk_pcie_init_dma_trx(struct rk_pcie *rk_pcie)
512845 {
846
+ if (!rk_pcie_udma_enabled(rk_pcie))
847
+ return 0;
848
+
513849 rk_pcie->dma_obj = rk_pcie_dma_obj_probe(rk_pcie->pci->dev);
514850 if (IS_ERR(rk_pcie->dma_obj)) {
515851 dev_err(rk_pcie->pci->dev, "failed to prepare dma object\n");
516852 return -EINVAL;
853
+ } else if (rk_pcie->dma_obj) {
854
+ goto out;
517855 }
518856
519
- rk_pcie->dma_obj = pcie_dw_dmatest_register(rk_pcie->pci, true);
857
+ rk_pcie->dma_obj = pcie_dw_dmatest_register(rk_pcie->pci->dev, true);
520858 if (IS_ERR(rk_pcie->dma_obj)) {
521859 dev_err(rk_pcie->pci->dev, "failed to prepare dmatest\n");
522860 return -EINVAL;
523861 }
524
-
862
+out:
525863 /* Enable client write and read interrupt */
526864 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
527865
....@@ -534,6 +872,76 @@
534872 return 0;
535873 }
536874
875
+static int rk_pci_find_resbar_capability(struct rk_pcie *rk_pcie)
876
+{
877
+ u32 header;
878
+ int ttl;
879
+ int start = 0;
880
+ int pos = PCI_CFG_SPACE_SIZE;
881
+ int cap = PCI_EXT_CAP_ID_REBAR;
882
+
883
+ /* minimum 8 bytes per capability */
884
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
885
+
886
+ header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
887
+
888
+ /*
889
+ * If we have no capabilities, this is indicated by cap ID,
890
+ * cap version and next pointer all being 0.
891
+ */
892
+ if (header == 0)
893
+ return 0;
894
+
895
+ while (ttl-- > 0) {
896
+ if (PCI_EXT_CAP_ID(header) == cap && pos != start)
897
+ return pos;
898
+
899
+ pos = PCI_EXT_CAP_NEXT(header);
900
+ if (pos < PCI_CFG_SPACE_SIZE)
901
+ break;
902
+
903
+ header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
904
+ if (!header)
905
+ break;
906
+ }
907
+
908
+ return 0;
909
+}
910
+
911
+#ifdef MODULE
912
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
913
+{
914
+ int ret;
915
+
916
+ if (pci->ops && pci->ops->write_dbi2) {
917
+ pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
918
+ return;
919
+ }
920
+
921
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
922
+ if (ret)
923
+ dev_err(pci->dev, "write DBI address failed\n");
924
+}
925
+#endif
926
+
927
+static int rk_pcie_ep_set_bar_flag(struct rk_pcie *rk_pcie, enum pci_barno barno, int flags)
928
+{
929
+ enum pci_barno bar = barno;
930
+ u32 reg;
931
+
932
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
933
+
934
+ /* Disabled the upper 32bits BAR to make a 64bits bar pair */
935
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
936
+ dw_pcie_writel_dbi2(rk_pcie->pci, reg + 4, 0);
937
+
938
+ dw_pcie_writel_dbi(rk_pcie->pci, reg, flags);
939
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
940
+ dw_pcie_writel_dbi(rk_pcie->pci, reg + 4, 0);
941
+
942
+ return 0;
943
+}
944
+
537945 static void rk_pcie_ep_setup(struct rk_pcie *rk_pcie)
538946 {
539947 int ret;
....@@ -541,6 +949,8 @@
541949 u32 lanes;
542950 struct device *dev = rk_pcie->pci->dev;
543951 struct device_node *np = dev->of_node;
952
+ int resbar_base;
953
+ int bar;
544954
545955 /* Enable client write and read interrupt */
546956 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
....@@ -604,17 +1014,36 @@
6041014 /* Enable bus master and memory space */
6051015 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_STATUS_COMMAND_REG, 0x6);
6061016
607
- /* Resize BAR0 to 4GB */
608
- /* bit13-8 set to 6 means 64MB */
609
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_RESBAR_CTRL_REG0_REG, 0x600);
1017
+ resbar_base = rk_pci_find_resbar_capability(rk_pcie);
1018
+ if (!resbar_base) {
1019
+ dev_warn(dev, "failed to find resbar_base\n");
1020
+ } else {
1021
+ /* Resize BAR0 to support 512GB, BAR1 to support 8M, BAR2~5 to support 64M */
1022
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4, 0xfffff0);
1023
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8, 0x13c0);
1024
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0xc, 0xfffff0);
1025
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x10, 0x3c0);
1026
+ for (bar = 2; bar < 6; bar++) {
1027
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
1028
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8 + bar * 0x8, 0x6c0);
1029
+ }
6101030
611
- /* Set shadow BAR0 according 64MB */
612
- val = rk_pcie->mem_size - 1;
613
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
1031
+ /* Set flags */
1032
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_0, PCI_BASE_ADDRESS_MEM_TYPE_32);
1033
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_1, PCI_BASE_ADDRESS_MEM_TYPE_32);
1034
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_2, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
1035
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_4, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
1036
+ }
6141037
615
- /* Set reserved memory address to BAR0 */
616
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_BAR0_REG,
617
- rk_pcie->mem_start);
1038
+ /* Device id and class id needed for request bar address */
1039
+ dw_pcie_writew_dbi(rk_pcie->pci, PCI_DEVICE_ID, 0x356a);
1040
+ dw_pcie_writew_dbi(rk_pcie->pci, PCI_CLASS_DEVICE, 0x0580);
1041
+
1042
+ /* Set shadow BAR0 */
1043
+ if (rk_pcie->is_rk1808) {
1044
+ val = rk_pcie->mem_size - 1;
1045
+ dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
1046
+ }
6181047 }
6191048
6201049 static int rk_pcie_ep_win_parse(struct rk_pcie *rk_pcie)
....@@ -675,6 +1104,14 @@
6751104 return 0;
6761105 }
6771106
1107
+static void rk_pcie_msi_set_num_vectors(struct pcie_port *pp)
1108
+{
1109
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1110
+ struct rk_pcie *rk_pcie = to_rk_pcie(pci);
1111
+
1112
+ pp->num_vectors = rk_pcie->msi_vector_num;
1113
+}
1114
+
6781115 static int rk_pcie_host_init(struct pcie_port *pp)
6791116 {
6801117 int ret;
....@@ -707,13 +1144,13 @@
7071144 if (pp->msi_irq < 0) {
7081145 dev_info(dev, "use outband MSI support");
7091146 rk_pcie_host_ops.msi_host_init = rk_pcie_msi_host_init;
1147
+ } else {
1148
+ dev_info(dev, "max MSI vector is %d\n", rk_pcie->msi_vector_num);
1149
+ rk_pcie_host_ops.set_num_vectors = rk_pcie_msi_set_num_vectors;
7101150 }
7111151 }
7121152
7131153 pp->ops = &rk_pcie_host_ops;
714
-
715
- if (device_property_read_bool(dev, "msi-map"))
716
- pp->msi_ext = 1;
7171154
7181155 ret = dw_pcie_host_init(pp);
7191156 if (ret) {
....@@ -753,6 +1190,8 @@
7531190 return ret;
7541191 }
7551192
1193
+ rk_pcie->pci->dbi_base2 = rk_pcie->pci->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET;
1194
+ rk_pcie->pci->atu_base = rk_pcie->pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
7561195 rk_pcie->pci->iatu_unroll_enabled = rk_pcie_iatu_unroll_enabled(rk_pcie->pci);
7571196
7581197 ret = rk_pcie_ep_atu_init(rk_pcie);
....@@ -769,52 +1208,24 @@
7691208 return ret;
7701209 }
7711210
772
- return 0;
773
-}
1211
+ if (!rk_pcie_udma_enabled(rk_pcie))
1212
+ return 0;
7741213
775
-static void rk_pcie_clk_deinit(struct rk_pcie *rk_pcie)
776
-{
777
- clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks);
778
- clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
1214
+ return 0;
7791215 }
7801216
7811217 static int rk_pcie_clk_init(struct rk_pcie *rk_pcie)
7821218 {
7831219 struct device *dev = rk_pcie->pci->dev;
784
- struct property *prop;
785
- const char *name;
786
- int i = 0, ret, count;
1220
+ int ret;
7871221
788
- count = of_property_count_strings(dev->of_node, "clock-names");
789
- if (count < 1)
1222
+ rk_pcie->clk_cnt = devm_clk_bulk_get_all(dev, &rk_pcie->clks);
1223
+ if (rk_pcie->clk_cnt < 1)
7901224 return -ENODEV;
7911225
792
- rk_pcie->clks = devm_kcalloc(dev, count,
793
- sizeof(struct clk_bulk_data),
794
- GFP_KERNEL);
795
- if (!rk_pcie->clks)
796
- return -ENOMEM;
797
-
798
- rk_pcie->clk_cnt = count;
799
-
800
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
801
- rk_pcie->clks[i].id = name;
802
- if (!rk_pcie->clks[i].id)
803
- return -ENOMEM;
804
- i++;
805
- }
806
-
807
- ret = devm_clk_bulk_get(dev, count, rk_pcie->clks);
808
- if (ret)
809
- return ret;
810
-
811
- ret = clk_bulk_prepare(count, rk_pcie->clks);
812
- if (ret)
813
- return ret;
814
-
815
- ret = clk_bulk_enable(count, rk_pcie->clks);
1226
+ ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
8161227 if (ret) {
817
- clk_bulk_unprepare(count, rk_pcie->clks);
1228
+ dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
8181229 return ret;
8191230 }
8201231
....@@ -865,6 +1276,14 @@
8651276 return PTR_ERR(rk_pcie->rst_gpio);
8661277 }
8671278
1279
+ if (device_property_read_u32(&pdev->dev, "rockchip,perst-inactive-ms",
1280
+ &rk_pcie->perst_inactive_ms))
1281
+ rk_pcie->perst_inactive_ms = 200;
1282
+
1283
+ rk_pcie->prsnt_gpio = devm_gpiod_get_optional(&pdev->dev, "prsnt", GPIOD_IN);
1284
+ if (IS_ERR_OR_NULL(rk_pcie->prsnt_gpio))
1285
+ dev_info(&pdev->dev, "invalid prsnt-gpios property in node\n");
1286
+
8681287 return 0;
8691288 }
8701289
....@@ -873,7 +1292,7 @@
8731292 int ret;
8741293 struct device *dev = rk_pcie->pci->dev;
8751294
876
- rk_pcie->phy = devm_phy_get(dev, "pcie-phy");
1295
+ rk_pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
8771296 if (IS_ERR(rk_pcie->phy)) {
8781297 if (PTR_ERR(rk_pcie->phy) != -EPROBE_DEFER)
8791298 dev_info(dev, "missing phy\n");
....@@ -882,23 +1301,27 @@
8821301
8831302 switch (rk_pcie->mode) {
8841303 case RK_PCIE_RC_TYPE:
885
- rk_pcie->phy_mode = PHY_MODE_PCIE_RC;
1304
+ rk_pcie->phy_mode = PHY_MODE_PCIE; /* make no sense */
1305
+ rk_pcie->phy_sub_mode = PHY_MODE_PCIE_RC;
8861306 break;
8871307 case RK_PCIE_EP_TYPE:
888
- rk_pcie->phy_mode = PHY_MODE_PCIE_EP;
1308
+ rk_pcie->phy_mode = PHY_MODE_PCIE;
1309
+ rk_pcie->phy_sub_mode = PHY_MODE_PCIE_EP;
8891310 break;
8901311 }
8911312
892
- ret = phy_set_mode(rk_pcie->phy, rk_pcie->phy_mode);
1313
+ ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1314
+ rk_pcie->phy_sub_mode);
8931315 if (ret) {
8941316 dev_err(dev, "fail to set phy to mode %s, err %d\n",
895
- (rk_pcie->phy_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
1317
+ (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
8961318 ret);
8971319 return ret;
8981320 }
8991321
9001322 if (rk_pcie->bifurcation)
901
- ret = phy_set_mode(rk_pcie->phy, PHY_MODE_PCIE_BIFURCATION);
1323
+ phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1324
+ PHY_MODE_PCIE_BIFURCATION);
9021325
9031326 ret = phy_init(rk_pcie->phy);
9041327 if (ret < 0) {
....@@ -907,53 +1330,6 @@
9071330 }
9081331
9091332 phy_power_on(rk_pcie->phy);
910
-
911
- return 0;
912
-}
913
-
914
-static int rk_pcie_reset_control_release(struct rk_pcie *rk_pcie)
915
-{
916
- struct device *dev = rk_pcie->pci->dev;
917
- struct property *prop;
918
- const char *name;
919
- int ret, count, i = 0;
920
-
921
- count = of_property_count_strings(dev->of_node, "reset-names");
922
- if (count < 1)
923
- return -ENODEV;
924
-
925
- rk_pcie->rsts = devm_kcalloc(dev, count,
926
- sizeof(struct reset_bulk_data),
927
- GFP_KERNEL);
928
- if (!rk_pcie->rsts)
929
- return -ENOMEM;
930
-
931
- of_property_for_each_string(dev->of_node, "reset-names",
932
- prop, name) {
933
- rk_pcie->rsts[i].id = name;
934
- if (!rk_pcie->rsts[i].id)
935
- return -ENOMEM;
936
- i++;
937
- }
938
-
939
- for (i = 0; i < count; i++) {
940
- rk_pcie->rsts[i].rst = devm_reset_control_get_exclusive(dev,
941
- rk_pcie->rsts[i].id);
942
- if (IS_ERR_OR_NULL(rk_pcie->rsts[i].rst)) {
943
- dev_err(dev, "failed to get %s\n",
944
- rk_pcie->clks[i].id);
945
- return -PTR_ERR(rk_pcie->rsts[i].rst);
946
- }
947
- }
948
-
949
- for (i = 0; i < count; i++) {
950
- ret = reset_control_deassert(rk_pcie->rsts[i].rst);
951
- if (ret) {
952
- dev_err(dev, "failed to release %s\n",
953
- rk_pcie->rsts[i].id);
954
- return ret;
955
- }
956
- }
9571333
9581334 return 0;
9591335 }
....@@ -1149,6 +1525,11 @@
11491525 .mode = RK_PCIE_EP_TYPE,
11501526 };
11511527
1528
+static const struct rk_pcie_of_data rk3528_pcie_rc_of_data = {
1529
+ .mode = RK_PCIE_RC_TYPE,
1530
+ .msi_vector_num = 8,
1531
+};
1532
+
11521533 static const struct of_device_id rk_pcie_of_match[] = {
11531534 {
11541535 .compatible = "rockchip,rk1808-pcie",
....@@ -1159,11 +1540,27 @@
11591540 .data = &rk_pcie_ep_of_data,
11601541 },
11611542 {
1543
+ .compatible = "rockchip,rk3528-pcie",
1544
+ .data = &rk3528_pcie_rc_of_data,
1545
+ },
1546
+ {
1547
+ .compatible = "rockchip,rk3562-pcie",
1548
+ .data = &rk3528_pcie_rc_of_data,
1549
+ },
1550
+ {
11621551 .compatible = "rockchip,rk3568-pcie",
11631552 .data = &rk_pcie_rc_of_data,
11641553 },
11651554 {
11661555 .compatible = "rockchip,rk3568-pcie-ep",
1556
+ .data = &rk_pcie_ep_of_data,
1557
+ },
1558
+ {
1559
+ .compatible = "rockchip,rk3588-pcie",
1560
+ .data = &rk_pcie_rc_of_data,
1561
+ },
1562
+ {
1563
+ .compatible = "rockchip,rk3588-pcie-ep",
11671564 .data = &rk_pcie_ep_of_data,
11681565 },
11691566 {},
....@@ -1342,41 +1739,6 @@
13421739 return ret;
13431740 }
13441741
1345
-static int rk_pci_find_capability(struct rk_pcie *rk_pcie, int cap)
1346
-{
1347
- u32 header;
1348
- int ttl;
1349
- int start = 0;
1350
- int pos = PCI_CFG_SPACE_SIZE;
1351
-
1352
- /* minimum 8 bytes per capability */
1353
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1354
-
1355
- header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
1356
-
1357
- /*
1358
- * If we have no capabilities, this is indicated by cap ID,
1359
- * cap version and next pointer all being 0.
1360
- */
1361
- if (header == 0)
1362
- return 0;
1363
-
1364
- while (ttl-- > 0) {
1365
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
1366
- return pos;
1367
-
1368
- pos = PCI_EXT_CAP_NEXT(header);
1369
- if (pos < PCI_CFG_SPACE_SIZE)
1370
- break;
1371
-
1372
- header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
1373
- if (!header)
1374
- break;
1375
- }
1376
-
1377
- return 0;
1378
-}
1379
-
13801742 #define RAS_DES_EVENT(ss, v) \
13811743 do { \
13821744 dw_pcie_writel_dbi(pcie->pci, cap_base + 8, v); \
....@@ -1387,8 +1749,27 @@
13871749 {
13881750 struct rk_pcie *pcie = s->private;
13891751 int cap_base;
1752
+ u32 val = rk_pcie_readl_apb(pcie, PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN);
1753
+ char *pm;
13901754
1391
- cap_base = rk_pci_find_capability(pcie, PCI_EXT_CAP_ID_VNDR);
1755
+ if (val & BIT(6))
1756
+ pm = "In training";
1757
+ else if (val & BIT(5))
1758
+ pm = "L1.2";
1759
+ else if (val & BIT(4))
1760
+ pm = "L1.1";
1761
+ else if (val & BIT(3))
1762
+ pm = "L1";
1763
+ else if (val & BIT(2))
1764
+ pm = "L0";
1765
+ else if (val & 0x3)
1766
+ pm = (val == 0x3) ? "L0s" : (val & BIT(1) ? "RX L0s" : "TX L0s");
1767
+ else
1768
+ pm = "Invalid";
1769
+
1770
+ seq_printf(s, "Common event signal status: 0x%s\n", pm);
1771
+
1772
+ cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
13921773 if (!cap_base) {
13931774 dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
13941775 return 0;
....@@ -1424,7 +1805,6 @@
14241805
14251806 return 0;
14261807 }
1427
-
14281808 static int rockchip_pcie_rasdes_open(struct inode *inode, struct file *file)
14291809 {
14301810 return single_open(file, rockchip_pcie_rasdes_show,
....@@ -1443,7 +1823,7 @@
14431823 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
14441824 return -EFAULT;
14451825
1446
- cap_base = rk_pci_find_capability(pcie, PCI_EXT_CAP_ID_VNDR);
1826
+ cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
14471827 if (!cap_base) {
14481828 dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
14491829 return 0;
....@@ -1528,29 +1908,35 @@
15281908 const struct rk_pcie_of_data *data;
15291909 enum rk_pcie_device_mode mode;
15301910 struct device_node *np = pdev->dev.of_node;
1531
- struct platform_driver *drv = to_platform_driver(dev->driver);
1532
- u32 val;
1911
+ u32 val = 0;
15331912 int irq;
15341913
15351914 match = of_match_device(rk_pcie_of_match, dev);
1536
- if (!match)
1537
- return -EINVAL;
1915
+ if (!match) {
1916
+ ret = -EINVAL;
1917
+ goto release_driver;
1918
+ }
15381919
15391920 data = (struct rk_pcie_of_data *)match->data;
15401921 mode = (enum rk_pcie_device_mode)data->mode;
15411922
15421923 rk_pcie = devm_kzalloc(dev, sizeof(*rk_pcie), GFP_KERNEL);
1543
- if (!rk_pcie)
1544
- return -ENOMEM;
1924
+ if (!rk_pcie) {
1925
+ ret = -ENOMEM;
1926
+ goto release_driver;
1927
+ }
15451928
15461929 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1547
- if (!pci)
1548
- return -ENOMEM;
1930
+ if (!pci) {
1931
+ ret = -ENOMEM;
1932
+ goto release_driver;
1933
+ }
15491934
15501935 pci->dev = dev;
15511936 pci->ops = &dw_pcie_ops;
15521937
15531938 rk_pcie->mode = mode;
1939
+ rk_pcie->msi_vector_num = data->msi_vector_num;
15541940 rk_pcie->pci = pci;
15551941
15561942 if (of_device_is_compatible(np, "rockchip,rk1808-pcie") ||
....@@ -1565,20 +1951,40 @@
15651951 ret = rk_pcie_resource_get(pdev, rk_pcie);
15661952 if (ret) {
15671953 dev_err(dev, "resource init failed\n");
1568
- return ret;
1954
+ goto release_driver;
15691955 }
15701956
1957
+ if (!IS_ERR_OR_NULL(rk_pcie->prsnt_gpio)) {
1958
+ if (!gpiod_get_value(rk_pcie->prsnt_gpio)) {
1959
+ ret = -ENODEV;
1960
+ goto release_driver;
1961
+ }
1962
+ }
1963
+
1964
+ rk_pcie->supports_clkreq = device_property_read_bool(dev, "supports-clkreq");
1965
+
1966
+retry_regulator:
15711967 /* DON'T MOVE ME: must be enable before phy init */
15721968 rk_pcie->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
15731969 if (IS_ERR(rk_pcie->vpcie3v3)) {
1574
- if (PTR_ERR(rk_pcie->vpcie3v3) != -ENODEV)
1575
- return PTR_ERR(rk_pcie->vpcie3v3);
1970
+ if (PTR_ERR(rk_pcie->vpcie3v3) != -ENODEV) {
1971
+ if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) {
1972
+ /* Deferred but in threaded context for most 10s */
1973
+ msleep(20);
1974
+ if (++val < 500)
1975
+ goto retry_regulator;
1976
+ }
1977
+
1978
+ ret = PTR_ERR(rk_pcie->vpcie3v3);
1979
+ goto release_driver;
1980
+ }
1981
+
15761982 dev_info(dev, "no vpcie3v3 regulator found\n");
15771983 }
15781984
15791985 ret = rk_pcie_enable_power(rk_pcie);
15801986 if (ret)
1581
- return ret;
1987
+ goto release_driver;
15821988
15831989 ret = rk_pcie_phy_init(rk_pcie);
15841990 if (ret) {
....@@ -1586,16 +1992,19 @@
15861992 goto disable_vpcie3v3;
15871993 }
15881994
1589
- ret = rk_pcie_reset_control_release(rk_pcie);
1590
- if (ret) {
1591
- dev_err(dev, "reset control init failed\n");
1592
- goto disable_vpcie3v3;
1995
+ rk_pcie->rsts = devm_reset_control_array_get_exclusive(dev);
1996
+ if (IS_ERR(rk_pcie->rsts)) {
1997
+ ret = PTR_ERR(rk_pcie->rsts);
1998
+ dev_err(dev, "failed to get reset lines\n");
1999
+ goto disable_phy;
15932000 }
2001
+
2002
+ reset_control_deassert(rk_pcie->rsts);
15942003
15952004 ret = rk_pcie_request_sys_irq(rk_pcie, pdev);
15962005 if (ret) {
15972006 dev_err(dev, "pcie irq init failed\n");
1598
- goto disable_vpcie3v3;
2007
+ goto disable_phy;
15992008 }
16002009
16012010 platform_set_drvdata(pdev, rk_pcie);
....@@ -1603,7 +2012,7 @@
16032012 ret = rk_pcie_clk_init(rk_pcie);
16042013 if (ret) {
16052014 dev_err(dev, "clock init failed\n");
1606
- goto disable_vpcie3v3;
2015
+ goto disable_phy;
16072016 }
16082017
16092018 dw_pcie_dbi_ro_wr_en(pci);
....@@ -1626,9 +2035,9 @@
16262035 /* Unmask all legacy interrupt from INTA~INTD */
16272036 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
16282037 UNMASK_ALL_LEGACY_INT);
2038
+ } else {
2039
+ dev_info(dev, "missing legacy IRQ resource\n");
16292040 }
1630
-
1631
- dev_info(dev, "missing legacy IRQ resource\n");
16322041 }
16332042
16342043 /* Set PCIe mode */
....@@ -1650,6 +2059,10 @@
16502059 rk_pcie->is_signal_test = true;
16512060 }
16522061
2062
+ /* Skip waiting for training to pass in system PM routine */
2063
+ if (device_property_read_bool(dev, "rockchip,skip-scan-in-resume"))
2064
+ rk_pcie->skip_scan_in_resume = true;
2065
+
16532066 switch (rk_pcie->mode) {
16542067 case RK_PCIE_RC_TYPE:
16552068 ret = rk_add_pcie_port(rk_pcie, pdev);
....@@ -1668,7 +2081,7 @@
16682081 ret = rk_pcie_init_dma_trx(rk_pcie);
16692082 if (ret) {
16702083 dev_err(dev, "failed to add dma extension\n");
1671
- return ret;
2084
+ goto remove_irq_domain;
16722085 }
16732086
16742087 if (rk_pcie->dma_obj) {
....@@ -1686,7 +2099,9 @@
16862099 dw_pcie_dbi_ro_wr_dis(pci);
16872100
16882101 device_init_wakeup(dev, true);
1689
- drv->driver.pm = &rockchip_dw_pcie_pm_ops;
2102
+
2103
+ /* Enable async system PM for multiports SoC */
2104
+ device_enable_async_suspend(dev);
16902105
16912106 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
16922107 ret = rockchip_pcie_debugfs_init(rk_pcie);
....@@ -1694,7 +2109,7 @@
16942109 dev_err(dev, "failed to setup debugfs: %d\n", ret);
16952110
16962111 /* Enable RASDES Error event by default */
1697
- val = rk_pci_find_capability(rk_pcie, PCI_EXT_CAP_ID_VNDR);
2112
+ val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_VNDR);
16982113 if (!val) {
16992114 dev_err(dev, "Not able to find RASDES CAP!\n");
17002115 return 0;
....@@ -1709,32 +2124,178 @@
17092124 remove_irq_domain:
17102125 if (rk_pcie->irq_domain)
17112126 irq_domain_remove(rk_pcie->irq_domain);
2127
+disable_phy:
2128
+ phy_power_off(rk_pcie->phy);
2129
+ phy_exit(rk_pcie->phy);
17122130 deinit_clk:
1713
- rk_pcie_clk_deinit(rk_pcie);
2131
+ clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
17142132 disable_vpcie3v3:
17152133 rk_pcie_disable_power(rk_pcie);
2134
+release_driver:
2135
+ if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT))
2136
+ device_release_driver(dev);
17162137
17172138 return ret;
17182139 }
17192140
17202141 static int rk_pcie_probe(struct platform_device *pdev)
17212142 {
1722
- struct task_struct *tsk;
2143
+ if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) {
2144
+ struct task_struct *tsk;
17232145
1724
- tsk = kthread_run(rk_pcie_really_probe, pdev, "rk-pcie");
1725
- if (IS_ERR(tsk)) {
1726
- dev_err(&pdev->dev, "start rk-pcie thread failed\n");
1727
- return PTR_ERR(tsk);
2146
+ tsk = kthread_run(rk_pcie_really_probe, pdev, "rk-pcie");
2147
+ if (IS_ERR(tsk)) {
2148
+ dev_err(&pdev->dev, "start rk-pcie thread failed\n");
2149
+ return PTR_ERR(tsk);
2150
+ }
2151
+
2152
+ return 0;
17282153 }
1729
- return 0;
2154
+
2155
+ return rk_pcie_really_probe(pdev);
17302156 }
2157
+
2158
+#ifdef CONFIG_PCIEASPM
2159
+static void rk_pcie_downstream_dev_to_d0(struct rk_pcie *rk_pcie, bool enable)
2160
+{
2161
+ struct pcie_port *pp = &rk_pcie->pci->pp;
2162
+ struct pci_bus *child, *root_bus = NULL;
2163
+ struct pci_dev *pdev, *bridge;
2164
+ u32 val;
2165
+
2166
+ list_for_each_entry(child, &pp->bridge->bus->children, node) {
2167
+ /* Bring downstream devices to D3 if they are not already in */
2168
+ if (child->parent == pp->bridge->bus) {
2169
+ root_bus = child;
2170
+ bridge = root_bus->self;
2171
+ break;
2172
+ }
2173
+ }
2174
+
2175
+ if (!root_bus) {
2176
+ dev_err(rk_pcie->pci->dev, "Failed to find downstream devices\n");
2177
+ return;
2178
+ }
2179
+
2180
+ /* Save and restore root bus ASPM */
2181
+ if (enable) {
2182
+ if (rk_pcie->l1ss_ctl1)
2183
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1, rk_pcie->l1ss_ctl1);
2184
+
2185
+ /* rk_pcie->aspm woule be saved in advance when enable is false */
2186
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, rk_pcie->aspm);
2187
+ } else {
2188
+ val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1);
2189
+ if (val & PCI_L1SS_CTL1_L1SS_MASK)
2190
+ rk_pcie->l1ss_ctl1 = val;
2191
+ else
2192
+ rk_pcie->l1ss_ctl1 = 0;
2193
+
2194
+ val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL);
2195
+ rk_pcie->aspm = val & PCI_EXP_LNKCTL_ASPMC;
2196
+ val &= ~(PCI_EXP_LNKCAP_ASPM_L1 | PCI_EXP_LNKCAP_ASPM_L0S);
2197
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, val);
2198
+ }
2199
+
2200
+ list_for_each_entry(pdev, &root_bus->devices, bus_list) {
2201
+ if (PCI_SLOT(pdev->devfn) == 0) {
2202
+ if (pci_set_power_state(pdev, PCI_D0))
2203
+ dev_err(rk_pcie->pci->dev,
2204
+ "Failed to transition %s to D3hot state\n",
2205
+ dev_name(&pdev->dev));
2206
+ if (enable) {
2207
+ if (rk_pcie->l1ss_ctl1) {
2208
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, &val);
2209
+ val &= ~PCI_L1SS_CTL1_L1SS_MASK;
2210
+ val |= (rk_pcie->l1ss_ctl1 & PCI_L1SS_CTL1_L1SS_MASK);
2211
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, val);
2212
+ }
2213
+
2214
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
2215
+ PCI_EXP_LNKCTL_ASPMC, rk_pcie->aspm);
2216
+ } else {
2217
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2218
+ }
2219
+ }
2220
+ }
2221
+}
2222
+#endif
17312223
17322224 static int __maybe_unused rockchip_dw_pcie_suspend(struct device *dev)
17332225 {
17342226 struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
1735
- int ret;
2227
+ int ret = 0, power;
2228
+ struct dw_pcie *pci = rk_pcie->pci;
2229
+ u32 status;
2230
+
2231
+ /*
2232
+ * This is as per PCI Express Base r5.0 r1.0 May 22-2019,
2233
+ * 5.2 Link State Power Management (Page #440).
2234
+ *
2235
+ * L2/L3 Ready entry negotiations happen while in the L0 state.
2236
+ * L2/L3 Ready are entered only after the negotiation completes.
2237
+ *
2238
+ * The following example sequence illustrates the multi-step Link state
2239
+ * transition process leading up to entering a system sleep state:
2240
+ * 1. System software directs all Functions of a Downstream component to D3Hot.
2241
+ * 2. The Downstream component then initiates the transition of the Link to L1
2242
+ * as required.
2243
+ * 3. System software then causes the Root Complex to broadcast the PME_Turn_Off
2244
+ * Message in preparation for removing the main power source.
2245
+ * 4. This Message causes the subject Link to transition back to L0 in order to
2246
+ * send it and to enable the Downstream component to respond with PME_TO_Ack.
2247
+ * 5. After sending the PME_TO_Ack, the Downstream component initiates the L2/L3
2248
+ * Ready transition protocol.
2249
+ */
2250
+
2251
+ /* 1. All sub-devices are in D3hot by PCIe stack */
2252
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
17362253
17372254 rk_pcie_link_status_clear(rk_pcie);
2255
+
2256
+ /*
2257
+ * Wlan devices will be shutdown from function driver now, so doing L2 here
2258
+ * must fail. Skip L2 routine.
2259
+ */
2260
+ if (rk_pcie->skip_scan_in_resume) {
2261
+ rfkill_get_wifi_power_state(&power);
2262
+ if (!power)
2263
+ goto no_l2;
2264
+ }
2265
+
2266
+ /* 2. Broadcast PME_Turn_Off Message */
2267
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_MSG_GEN, PME_TURN_OFF);
2268
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_MSG_GEN,
2269
+ status, !(status & BIT(4)), 20, RK_PCIE_L2_TMOUT_US);
2270
+ if (ret) {
2271
+ dev_err(dev, "Failed to send PME_Turn_Off\n");
2272
+ goto no_l2;
2273
+ }
2274
+
2275
+ /* 3. Wait for PME_TO_Ack */
2276
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_INTR_STATUS_MSG_RX,
2277
+ status, status & BIT(9), 20, RK_PCIE_L2_TMOUT_US);
2278
+ if (ret) {
2279
+ dev_err(dev, "Failed to receive PME_TO_Ack\n");
2280
+ goto no_l2;
2281
+ }
2282
+
2283
+ /* 4. Clear PME_TO_Ack and Wait for ready to enter L23 message */
2284
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MSG_RX, PME_TO_ACK);
2285
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_POWER,
2286
+ status, status & READY_ENTER_L23, 20, RK_PCIE_L2_TMOUT_US);
2287
+ if (ret) {
2288
+ dev_err(dev, "Failed to ready to enter L23\n");
2289
+ goto no_l2;
2290
+ }
2291
+
2292
+ /* 5. Check we are in L2 */
2293
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS,
2294
+ status, ((status & S_MAX) == S_L2_IDLE), 20, RK_PCIE_L2_TMOUT_US);
2295
+ if (ret)
2296
+ dev_err(pci->dev, "Link isn't in L2 idle!\n");
2297
+
2298
+no_l2:
17382299 rk_pcie_disable_ltssm(rk_pcie);
17392300
17402301 /* make sure assert phy success */
....@@ -1743,15 +2304,12 @@
17432304 phy_power_off(rk_pcie->phy);
17442305 phy_exit(rk_pcie->phy);
17452306
1746
- clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks);
2307
+ clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
17472308
17482309 rk_pcie->in_suspend = true;
17492310
17502311 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
17512312 ret = rk_pcie_disable_power(rk_pcie);
1752
-
1753
- if (rk_pcie->pci->pp.msi_irq > 0)
1754
- dw_pcie_free_msi(&rk_pcie->pci->pp);
17552313
17562314 return ret;
17572315 }
....@@ -1762,20 +2320,25 @@
17622320 bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
17632321 int ret;
17642322
2323
+ reset_control_assert(rk_pcie->rsts);
2324
+ udelay(10);
2325
+ reset_control_deassert(rk_pcie->rsts);
2326
+
17652327 ret = rk_pcie_enable_power(rk_pcie);
17662328 if (ret)
17672329 return ret;
17682330
1769
- ret = clk_bulk_enable(rk_pcie->clk_cnt, rk_pcie->clks);
2331
+ ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
17702332 if (ret) {
1771
- clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
2333
+ dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
17722334 return ret;
17732335 }
17742336
1775
- ret = phy_set_mode(rk_pcie->phy, rk_pcie->phy_mode);
2337
+ ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
2338
+ rk_pcie->phy_sub_mode);
17762339 if (ret) {
17772340 dev_err(dev, "fail to set phy to mode %s, err %d\n",
1778
- (rk_pcie->phy_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
2341
+ (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
17792342 ret);
17802343 return ret;
17812344 }
....@@ -1811,9 +2374,6 @@
18112374 goto err;
18122375 }
18132376
1814
- if (rk_pcie->pci->pp.msi_irq > 0)
1815
- dw_pcie_msi_init(&rk_pcie->pci->pp);
1816
-
18172377 if (std_rc)
18182378 goto std_rc_done;
18192379
....@@ -1836,6 +2396,9 @@
18362396 goto err;
18372397 }
18382398
2399
+ if (rk_pcie->pci->pp.msi_irq > 0)
2400
+ dw_pcie_msi_init(&rk_pcie->pci->pp);
2401
+
18392402 return 0;
18402403 err:
18412404 rk_pcie_disable_power(rk_pcie);
....@@ -1843,7 +2406,33 @@
18432406 return ret;
18442407 }
18452408
2409
+#ifdef CONFIG_PCIEASPM
2410
+static int rockchip_dw_pcie_prepare(struct device *dev)
2411
+{
2412
+ struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2413
+
2414
+ dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2415
+ rk_pcie_downstream_dev_to_d0(rk_pcie, false);
2416
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2417
+
2418
+ return 0;
2419
+}
2420
+
2421
+static void rockchip_dw_pcie_complete(struct device *dev)
2422
+{
2423
+ struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2424
+
2425
+ dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2426
+ rk_pcie_downstream_dev_to_d0(rk_pcie, true);
2427
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2428
+}
2429
+#endif
2430
+
18462431 static const struct dev_pm_ops rockchip_dw_pcie_pm_ops = {
2432
+#ifdef CONFIG_PCIEASPM
2433
+ .prepare = rockchip_dw_pcie_prepare,
2434
+ .complete = rockchip_dw_pcie_complete,
2435
+#endif
18472436 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_dw_pcie_suspend,
18482437 rockchip_dw_pcie_resume)
18492438 };
....@@ -1853,10 +2442,12 @@
18532442 .name = "rk-pcie",
18542443 .of_match_table = rk_pcie_of_match,
18552444 .suppress_bind_attrs = true,
2445
+ .pm = &rockchip_dw_pcie_pm_ops,
18562446 },
2447
+ .probe = rk_pcie_probe,
18572448 };
18582449
1859
-module_platform_driver_probe(rk_plat_pcie_driver, rk_pcie_probe);
2450
+module_platform_driver(rk_plat_pcie_driver);
18602451
18612452 MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com>");
18622453 MODULE_DESCRIPTION("RockChip PCIe Controller driver");