forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 ee930fffee469d076998274a2ca55e13dc1efb67
kernel/drivers/pci/controller/dwc/pcie-dw-rockchip.c
....@@ -8,12 +8,14 @@
88 * Author: Simon Xue <xxm@rock-chips.com>
99 */
1010
11
+#include <dt-bindings/phy/phy.h>
1112 #include <linux/clk.h>
1213 #include <linux/delay.h>
1314 #include <linux/fs.h>
1415 #include <linux/gpio.h>
1516 #include <linux/init.h>
1617 #include <linux/interrupt.h>
18
+#include <linux/iopoll.h>
1719 #include <linux/irq.h>
1820 #include <linux/irqchip/chained_irq.h>
1921 #include <linux/irqdomain.h>
....@@ -29,6 +31,7 @@
2931 #include <linux/of_pci.h>
3032 #include <linux/pci.h>
3133 #include <linux/phy/phy.h>
34
+#include <linux/phy/pcie.h>
3235 #include <linux/platform_device.h>
3336 #include <linux/poll.h>
3437 #include <linux/regmap.h>
....@@ -50,15 +53,11 @@
5053 RK_PCIE_RC_TYPE,
5154 };
5255
53
-struct reset_bulk_data {
54
- const char *id;
55
- struct reset_control *rst;
56
-};
57
-
5856 #define RK_PCIE_DBG 0
5957
6058 #define PCIE_DMA_OFFSET 0x380000
6159
60
+#define PCIE_DMA_CTRL_OFF 0x8
6261 #define PCIE_DMA_WR_ENB 0xc
6362 #define PCIE_DMA_WR_CTRL_LO 0x200
6463 #define PCIE_DMA_WR_CTRL_HI 0x204
....@@ -102,6 +101,8 @@
102101
103102 #define PCIE_CAP_LINK_CONTROL2_LINK_STATUS 0xa0
104103
104
+#define PCIE_CLIENT_INTR_STATUS_MSG_RX 0x04
105
+#define PME_TO_ACK (BIT(9) | BIT(25))
105106 #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x08
106107 #define PCIE_CLIENT_INTR_STATUS_MISC 0x10
107108 #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
....@@ -109,12 +110,21 @@
109110 #define MASK_LEGACY_INT(x) (0x00110011 << x)
110111 #define UNMASK_LEGACY_INT(x) (0x00110000 << x)
111112 #define PCIE_CLIENT_INTR_MASK 0x24
113
+#define PCIE_CLIENT_POWER 0x2c
114
+#define READY_ENTER_L23 BIT(3)
115
+#define PCIE_CLIENT_MSG_GEN 0x34
116
+#define PME_TURN_OFF (BIT(4) | BIT(20))
112117 #define PCIE_CLIENT_GENERAL_DEBUG 0x104
113118 #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
119
+#define PCIE_LTSSM_APP_DLY1_EN BIT(0)
120
+#define PCIE_LTSSM_APP_DLY2_EN BIT(1)
121
+#define PCIE_LTSSM_APP_DLY1_DONE BIT(2)
122
+#define PCIE_LTSSM_APP_DLY2_DONE BIT(3)
114123 #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
115124 #define PCIE_CLIENT_LTSSM_STATUS 0x300
116125 #define SMLH_LINKUP BIT(16)
117126 #define RDLH_LINKUP BIT(17)
127
+#define PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN 0x154
118128 #define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310
119129 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320
120130 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324
....@@ -122,20 +132,32 @@
122132 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c
123133 #define PCIE_CLIENT_DBG_FIFO_STATUS 0x350
124134 #define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000
125
-#define PCIE_CLIENT_DBF_EN 0xffff0003
135
+#define PCIE_CLIENT_DBF_EN 0xffff0007
126136
127137 #define PCIE_PHY_LINKUP BIT(0)
128138 #define PCIE_DATA_LINKUP BIT(1)
129139
130
-#define PCIE_RESBAR_CTRL_REG0_REG 0x2a8
140
+#define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000
131141 #define PCIE_SB_BAR0_MASK_REG 0x100010
132142
133143 #define PCIE_PL_ORDER_RULE_CTRL_OFF 0x8B4
144
+#define RK_PCIE_L2_TMOUT_US 5000
145
+#define RK_PCIE_HOTRESET_TMOUT_US 10000
146
+#define RK_PCIE_ENUM_HW_RETRYIES 2
147
+
148
+enum rk_pcie_ltssm_code {
149
+ S_L0 = 0x11,
150
+ S_L0S = 0x12,
151
+ S_L1_IDLE = 0x14,
152
+ S_L2_IDLE = 0x15,
153
+ S_MAX = 0x1f,
154
+};
134155
135156 struct rk_pcie {
136157 struct dw_pcie *pci;
137158 enum rk_pcie_device_mode mode;
138159 enum phy_mode phy_mode;
160
+ int phy_sub_mode;
139161 unsigned char bar_to_atu[6];
140162 phys_addr_t *outbound_addr;
141163 unsigned long *ib_window_map;
....@@ -146,9 +168,10 @@
146168 void __iomem *apb_base;
147169 struct phy *phy;
148170 struct clk_bulk_data *clks;
171
+ struct reset_control *rsts;
149172 unsigned int clk_cnt;
150
- struct reset_bulk_data *rsts;
151173 struct gpio_desc *rst_gpio;
174
+ u32 perst_inactive_ms;
152175 struct gpio_desc *prsnt_gpio;
153176 phys_addr_t mem_start;
154177 size_t mem_size;
....@@ -157,15 +180,22 @@
157180 struct regmap *pmu_grf;
158181 struct dma_trx_obj *dma_obj;
159182 bool in_suspend;
160
- bool skip_scan_in_resume;
183
+ bool skip_scan_in_resume;
161184 bool is_rk1808;
162185 bool is_signal_test;
163186 bool bifurcation;
187
+ bool supports_clkreq;
164188 struct regulator *vpcie3v3;
165189 struct irq_domain *irq_domain;
166190 raw_spinlock_t intx_lock;
191
+ u16 aspm;
192
+ u32 l1ss_ctl1;
167193 struct dentry *debugfs;
168194 u32 msi_vector_num;
195
+ struct workqueue_struct *hot_rst_wq;
196
+ struct work_struct hot_rst_work;
197
+ u32 comp_prst[2];
198
+ u32 intx;
169199 };
170200
171201 struct rk_pcie_of_data {
....@@ -174,7 +204,8 @@
174204 };
175205
176206 #define to_rk_pcie(x) dev_get_drvdata((x)->dev)
177
-static const struct dev_pm_ops rockchip_dw_pcie_pm_ops;
207
+static int rk_pcie_disable_power(struct rk_pcie *rk_pcie);
208
+static int rk_pcie_enable_power(struct rk_pcie *rk_pcie);
178209
179210 static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
180211 {
....@@ -259,12 +290,154 @@
259290 return 0;
260291 }
261292
293
+static void rk_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
294
+{
295
+ int ret;
296
+
297
+ if (pci->ops->write_dbi) {
298
+ pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
299
+ return;
300
+ }
301
+
302
+ ret = dw_pcie_write(pci->atu_base + reg, 4, val);
303
+ if (ret)
304
+ dev_err(pci->dev, "Write ATU address failed\n");
305
+}
306
+
307
+static void rk_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
308
+ u32 val)
309
+{
310
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
311
+
312
+ rk_pcie_writel_atu(pci, offset + reg, val);
313
+}
314
+
315
+static u32 rk_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
316
+{
317
+ int ret;
318
+ u32 val;
319
+
320
+ if (pci->ops->read_dbi)
321
+ return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
322
+
323
+ ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
324
+ if (ret)
325
+ dev_err(pci->dev, "Read ATU address failed\n");
326
+
327
+ return val;
328
+}
329
+
330
+static u32 rk_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
331
+{
332
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
333
+
334
+ return rk_pcie_readl_atu(pci, offset + reg);
335
+}
336
+
337
+static int rk_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
338
+ int index, int bar, u64 cpu_addr,
339
+ enum dw_pcie_as_type as_type)
340
+{
341
+ int type;
342
+ u32 retries, val;
343
+
344
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
345
+ lower_32_bits(cpu_addr));
346
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
347
+ upper_32_bits(cpu_addr));
348
+
349
+ switch (as_type) {
350
+ case DW_PCIE_AS_MEM:
351
+ type = PCIE_ATU_TYPE_MEM;
352
+ break;
353
+ case DW_PCIE_AS_IO:
354
+ type = PCIE_ATU_TYPE_IO;
355
+ break;
356
+ default:
357
+ return -EINVAL;
358
+ }
359
+
360
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
361
+ PCIE_ATU_FUNC_NUM(func_no));
362
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
363
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
364
+ PCIE_ATU_ENABLE |
365
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
366
+
367
+ /*
368
+ * Make sure ATU enable takes effect before any subsequent config
369
+ * and I/O accesses.
370
+ */
371
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
372
+ val = rk_pcie_readl_ib_unroll(pci, index,
373
+ PCIE_ATU_UNR_REGION_CTRL2);
374
+ if (val & PCIE_ATU_ENABLE)
375
+ return 0;
376
+
377
+ mdelay(LINK_WAIT_IATU);
378
+ }
379
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
380
+
381
+ return -EBUSY;
382
+}
383
+
384
+
385
+static int rk_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
386
+ int bar, u64 cpu_addr,
387
+ enum dw_pcie_as_type as_type)
388
+{
389
+ int type;
390
+ u32 retries, val;
391
+
392
+ if (pci->iatu_unroll_enabled)
393
+ return rk_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
394
+ cpu_addr, as_type);
395
+
396
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
397
+ index);
398
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
399
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
400
+
401
+ switch (as_type) {
402
+ case DW_PCIE_AS_MEM:
403
+ type = PCIE_ATU_TYPE_MEM;
404
+ break;
405
+ case DW_PCIE_AS_IO:
406
+ type = PCIE_ATU_TYPE_IO;
407
+ break;
408
+ default:
409
+ return -EINVAL;
410
+ }
411
+
412
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
413
+ PCIE_ATU_FUNC_NUM(func_no));
414
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
415
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
416
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
417
+
418
+ /*
419
+ * Make sure ATU enable takes effect before any subsequent config
420
+ * and I/O accesses.
421
+ */
422
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
423
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
424
+ if (val & PCIE_ATU_ENABLE)
425
+ return 0;
426
+
427
+ mdelay(LINK_WAIT_IATU);
428
+ }
429
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
430
+
431
+ return -EBUSY;
432
+}
433
+
262434 static int rk_pcie_ep_inbound_atu(struct rk_pcie *rk_pcie,
263435 enum pci_barno bar, dma_addr_t cpu_addr,
264436 enum dw_pcie_as_type as_type)
265437 {
266438 int ret;
267439 u32 free_win;
440
+ u8 func_no = 0x0;
268441
269442 if (rk_pcie->in_suspend) {
270443 free_win = rk_pcie->bar_to_atu[bar];
....@@ -277,8 +450,8 @@
277450 }
278451 }
279452
280
- ret = dw_pcie_prog_inbound_atu(rk_pcie->pci, free_win, bar, cpu_addr,
281
- as_type);
453
+ ret = rk_pcie_prog_inbound_atu(rk_pcie->pci, func_no, free_win, bar,
454
+ cpu_addr, as_type);
282455 if (ret < 0) {
283456 dev_err(rk_pcie->pci->dev, "Failed to program IB window\n");
284457 return ret;
....@@ -291,6 +464,105 @@
291464 set_bit(free_win, rk_pcie->ib_window_map);
292465
293466 return 0;
467
+}
468
+
469
+static void rk_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
470
+ u32 val)
471
+{
472
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
473
+
474
+ rk_pcie_writel_atu(pci, offset + reg, val);
475
+}
476
+
477
+static u32 rk_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
478
+{
479
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
480
+
481
+ return rk_pcie_readl_atu(pci, offset + reg);
482
+}
483
+
484
+static void rk_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
485
+ int index, int type,
486
+ u64 cpu_addr, u64 pci_addr,
487
+ u32 size)
488
+{
489
+ u32 retries, val;
490
+ u64 limit_addr = cpu_addr + size - 1;
491
+
492
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
493
+ lower_32_bits(cpu_addr));
494
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
495
+ upper_32_bits(cpu_addr));
496
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
497
+ lower_32_bits(limit_addr));
498
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
499
+ upper_32_bits(limit_addr));
500
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
501
+ lower_32_bits(pci_addr));
502
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
503
+ upper_32_bits(pci_addr));
504
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
505
+ type | PCIE_ATU_FUNC_NUM(func_no));
506
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
507
+ PCIE_ATU_ENABLE);
508
+
509
+ /*
510
+ * Make sure ATU enable takes effect before any subsequent config
511
+ * and I/O accesses.
512
+ */
513
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
514
+ val = rk_pcie_readl_ob_unroll(pci, index,
515
+ PCIE_ATU_UNR_REGION_CTRL2);
516
+ if (val & PCIE_ATU_ENABLE)
517
+ return;
518
+
519
+ mdelay(LINK_WAIT_IATU);
520
+ }
521
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
522
+}
523
+
524
+static void rk_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
525
+ int type, u64 cpu_addr, u64 pci_addr, u32 size)
526
+{
527
+ u32 retries, val;
528
+
529
+ if (pci->ops->cpu_addr_fixup)
530
+ cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
531
+
532
+ if (pci->iatu_unroll_enabled) {
533
+ rk_pcie_prog_outbound_atu_unroll(pci, 0x0, index, type,
534
+ cpu_addr, pci_addr, size);
535
+ return;
536
+ }
537
+
538
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
539
+ PCIE_ATU_REGION_OUTBOUND | index);
540
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
541
+ lower_32_bits(cpu_addr));
542
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
543
+ upper_32_bits(cpu_addr));
544
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
545
+ lower_32_bits(cpu_addr + size - 1));
546
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
547
+ lower_32_bits(pci_addr));
548
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
549
+ upper_32_bits(pci_addr));
550
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
551
+ PCIE_ATU_FUNC_NUM(0x0));
552
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
553
+
554
+ /*
555
+ * Make sure ATU enable takes effect before any subsequent config
556
+ * and I/O accesses.
557
+ */
558
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
559
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
560
+ if (val & PCIE_ATU_ENABLE)
561
+ return;
562
+
563
+ mdelay(LINK_WAIT_IATU);
564
+ }
565
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
294566 }
295567
296568 static int rk_pcie_ep_outbound_atu(struct rk_pcie *rk_pcie,
....@@ -311,7 +583,7 @@
311583 }
312584 }
313585
314
- dw_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
586
+ rk_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
315587 phys_addr, pci_addr, size);
316588
317589 if (rk_pcie->in_suspend)
....@@ -368,6 +640,28 @@
368640 return 0;
369641 }
370642
643
+#if defined(CONFIG_PCIEASPM)
644
+static void disable_aspm_l1ss(struct rk_pcie *rk_pcie)
645
+{
646
+ u32 val, cfg_link_cap_l1sub;
647
+
648
+ val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_L1SS);
649
+ if (!val) {
650
+ dev_err(rk_pcie->pci->dev, "can't find l1ss cap\n");
651
+
652
+ return;
653
+ }
654
+
655
+ cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
656
+
657
+ val = dw_pcie_readl_dbi(rk_pcie->pci, cfg_link_cap_l1sub);
658
+ val &= ~(PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2 | PCI_L1SS_CAP_L1_PM_SS);
659
+ dw_pcie_writel_dbi(rk_pcie->pci, cfg_link_cap_l1sub, val);
660
+}
661
+#else
662
+static inline void disable_aspm_l1ss(struct rk_pcie *rk_pcie) { return; }
663
+#endif
664
+
371665 static inline void rk_pcie_set_mode(struct rk_pcie *rk_pcie)
372666 {
373667 switch (rk_pcie->mode) {
....@@ -375,6 +669,14 @@
375669 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00000);
376670 break;
377671 case RK_PCIE_RC_TYPE:
672
+ if (rk_pcie->supports_clkreq) {
673
+ /* Application is ready to have reference clock removed */
674
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x00010001);
675
+ } else {
676
+ /* Pull down CLKREQ# to assert the connecting CLOCK_GEN OE */
677
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x30011000);
678
+ disable_aspm_l1ss(rk_pcie);
679
+ }
378680 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00040);
379681 /*
380682 * Disable order rule for CPL can't pass halted P queue.
....@@ -404,28 +706,10 @@
404706 rk_pcie_writel_apb(rk_pcie, 0x0, 0xC000C);
405707 }
406708
407
-static int rk_pcie_link_up(struct dw_pcie *pci)
408
-{
409
- struct rk_pcie *rk_pcie = to_rk_pcie(pci);
410
- u32 val;
411
-
412
- if (rk_pcie->is_rk1808) {
413
- val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG);
414
- if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3 &&
415
- ((val & GENMASK(15, 10)) >> 10) == 0x11)
416
- return 1;
417
- } else {
418
- val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS);
419
- if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000)
420
- return 1;
421
- }
422
-
423
- return 0;
424
-}
425
-
426709 static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie)
427710 {
428
-#if RK_PCIE_DBG
711
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
712
+ return;
429713 if (rk_pcie->is_rk1808 == true)
430714 return;
431715
....@@ -439,7 +723,6 @@
439723 PCIE_CLIENT_DBG_TRANSITION_DATA);
440724 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON,
441725 PCIE_CLIENT_DBF_EN);
442
-#endif
443726 }
444727
445728 static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie)
....@@ -461,6 +744,8 @@
461744 int retries, power;
462745 struct rk_pcie *rk_pcie = to_rk_pcie(pci);
463746 bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
747
+ int hw_retries = 0;
748
+ u32 ltssm;
464749
465750 /*
466751 * For standard RC, even if the link has been setup by firmware,
....@@ -472,80 +757,113 @@
472757 return 0;
473758 }
474759
475
- rk_pcie_disable_ltssm(rk_pcie);
476
- rk_pcie_link_status_clear(rk_pcie);
477
- rk_pcie_enable_debug(rk_pcie);
760
+ for (hw_retries = 0; hw_retries < RK_PCIE_ENUM_HW_RETRYIES; hw_retries++) {
761
+ /* Rest the device */
762
+ gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
478763
479
- /* Enable client reset or link down interrupt */
480
- rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0x40000);
764
+ rk_pcie_disable_ltssm(rk_pcie);
765
+ rk_pcie_link_status_clear(rk_pcie);
766
+ rk_pcie_enable_debug(rk_pcie);
481767
482
- /* Enable LTSSM */
483
- rk_pcie_enable_ltssm(rk_pcie);
768
+ /* Enable client reset or link down interrupt */
769
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0x40000);
484770
485
- /*
486
- * In resume routine, function devices' resume function must be late after
487
- * controllers'. Some devices, such as Wi-Fi, need special IO setting before
488
- * finishing training. So there must be timeout here. These kinds of devices
489
- * need rescan devices by its driver when used. So no need to waste time waiting
490
- * for training pass.
491
- */
492
- if (rk_pcie->in_suspend && rk_pcie->skip_scan_in_resume) {
493
- rfkill_get_wifi_power_state(&power);
494
- if (!power) {
495
- gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
496
- return 0;
771
+ /* Enable LTSSM */
772
+ rk_pcie_enable_ltssm(rk_pcie);
773
+
774
+ /*
775
+ * In resume routine, function devices' resume function must be late after
776
+ * controllers'. Some devices, such as Wi-Fi, need special IO setting before
777
+ * finishing training. So there must be timeout here. These kinds of devices
778
+ * need rescan devices by its driver when used. So no need to waste time waiting
779
+ * for training pass.
780
+ */
781
+ if (rk_pcie->in_suspend && rk_pcie->skip_scan_in_resume) {
782
+ rfkill_get_wifi_power_state(&power);
783
+ if (!power) {
784
+ gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
785
+ return 0;
786
+ }
497787 }
498
- }
499788
500
- /*
501
- * PCIe requires the refclk to be stable for 100µs prior to releasing
502
- * PERST and T_PVPERL (Power stable to PERST# inactive) should be a
503
- * minimum of 100ms. See table 2-4 in section 2.6.2 AC, the PCI Express
504
- * Card Electromechanical Specification 3.0. So 100ms in total is the min
505
- * requuirement here. We add a 1s for sake of hoping everthings work fine.
506
- */
507
- msleep(1000);
508
- gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
789
+ /*
790
+ * PCIe requires the refclk to be stable for 100µs prior to releasing
791
+ * PERST and T_PVPERL (Power stable to PERST# inactive) should be a
792
+ * minimum of 100ms. See table 2-4 in section 2.6.2 AC, the PCI Express
793
+ * Card Electromechanical Specification 3.0. So 100ms in total is the min
794
+ * requuirement here. We add a 200ms by default for sake of hoping everthings
795
+ * work fine. If it doesn't, please add more in DT node by add rockchip,perst-inactive-ms.
796
+ */
797
+ msleep(rk_pcie->perst_inactive_ms);
798
+ gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
509799
510
- /*
511
- * Add this 1ms delay because we observe link is always up stably after it and
512
- * could help us save 20ms for scanning devices.
513
- */
514
- usleep_range(1000, 1100);
800
+ /*
801
+ * Add this 1ms delay because we observe link is always up stably after it and
802
+ * could help us save 20ms for scanning devices.
803
+ */
804
+ usleep_range(1000, 1100);
515805
516
- for (retries = 0; retries < 10; retries++) {
517
- if (dw_pcie_link_up(pci)) {
518
- /*
519
- * We may be here in case of L0 in Gen1. But if EP is capable
520
- * of Gen2 or Gen3, Gen switch may happen just in this time, but
521
- * we keep on accessing devices in unstable link status. Given
522
- * that LTSSM max timeout is 24ms per period, we can wait a bit
523
- * more for Gen switch.
524
- */
525
- msleep(100);
526
- dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
527
- rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
806
+ for (retries = 0; retries < 100; retries++) {
807
+ if (dw_pcie_link_up(pci)) {
808
+ /*
809
+ * We may be here in case of L0 in Gen1. But if EP is capable
810
+ * of Gen2 or Gen3, Gen switch may happen just in this time, but
811
+ * we keep on accessing devices in unstable link status. Given
812
+ * that LTSSM max timeout is 24ms per period, we can wait a bit
813
+ * more for Gen switch.
814
+ */
815
+ msleep(50);
816
+ /* In case link drop after linkup, double check it */
817
+ if (dw_pcie_link_up(pci)) {
818
+ dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
819
+ rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
820
+ rk_pcie_debug_dump(rk_pcie);
821
+ return 0;
822
+ }
823
+ }
824
+
825
+ dev_info_ratelimited(pci->dev, "PCIe Linking... LTSSM is 0x%x\n",
826
+ rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
528827 rk_pcie_debug_dump(rk_pcie);
529
- return 0;
828
+ msleep(20);
530829 }
531830
532
- dev_info_ratelimited(pci->dev, "PCIe Linking... LTSSM is 0x%x\n",
533
- rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
534
- rk_pcie_debug_dump(rk_pcie);
535
- msleep(1000);
831
+ /*
832
+ * In response to the situation where PCIe peripherals cannot be
833
+ * enumerated due tosignal abnormalities, reset PERST# and reset
834
+ * the peripheral power supply, then restart the enumeration.
835
+ */
836
+ ltssm = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS);
837
+ dev_err(pci->dev, "PCIe Link Fail, LTSSM is 0x%x, hw_retries=%d\n", ltssm, hw_retries);
838
+ if (ltssm >= 3 && !rk_pcie->is_signal_test) {
839
+ rk_pcie_disable_power(rk_pcie);
840
+ msleep(1000);
841
+ rk_pcie_enable_power(rk_pcie);
842
+ } else {
843
+ break;
844
+ }
536845 }
537
-
538
- dev_err(pci->dev, "PCIe Link Fail\n");
539846
540847 return rk_pcie->is_signal_test == true ? 0 : -EINVAL;
541848 }
542849
850
+static bool rk_pcie_udma_enabled(struct rk_pcie *rk_pcie)
851
+{
852
+ return dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
853
+ PCIE_DMA_CTRL_OFF);
854
+}
855
+
543856 static int rk_pcie_init_dma_trx(struct rk_pcie *rk_pcie)
544857 {
858
+ if (!rk_pcie_udma_enabled(rk_pcie))
859
+ return 0;
860
+
545861 rk_pcie->dma_obj = rk_pcie_dma_obj_probe(rk_pcie->pci->dev);
546862 if (IS_ERR(rk_pcie->dma_obj)) {
547863 dev_err(rk_pcie->pci->dev, "failed to prepare dma object\n");
548864 return -EINVAL;
865
+ } else if (rk_pcie->dma_obj) {
866
+ goto out;
549867 }
550868
551869 rk_pcie->dma_obj = pcie_dw_dmatest_register(rk_pcie->pci->dev, true);
....@@ -553,7 +871,7 @@
553871 dev_err(rk_pcie->pci->dev, "failed to prepare dmatest\n");
554872 return -EINVAL;
555873 }
556
-
874
+out:
557875 /* Enable client write and read interrupt */
558876 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
559877
....@@ -566,6 +884,76 @@
566884 return 0;
567885 }
568886
887
+static int rk_pci_find_resbar_capability(struct rk_pcie *rk_pcie)
888
+{
889
+ u32 header;
890
+ int ttl;
891
+ int start = 0;
892
+ int pos = PCI_CFG_SPACE_SIZE;
893
+ int cap = PCI_EXT_CAP_ID_REBAR;
894
+
895
+ /* minimum 8 bytes per capability */
896
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
897
+
898
+ header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
899
+
900
+ /*
901
+ * If we have no capabilities, this is indicated by cap ID,
902
+ * cap version and next pointer all being 0.
903
+ */
904
+ if (header == 0)
905
+ return 0;
906
+
907
+ while (ttl-- > 0) {
908
+ if (PCI_EXT_CAP_ID(header) == cap && pos != start)
909
+ return pos;
910
+
911
+ pos = PCI_EXT_CAP_NEXT(header);
912
+ if (pos < PCI_CFG_SPACE_SIZE)
913
+ break;
914
+
915
+ header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
916
+ if (!header)
917
+ break;
918
+ }
919
+
920
+ return 0;
921
+}
922
+
923
+#ifdef MODULE
924
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
925
+{
926
+ int ret;
927
+
928
+ if (pci->ops && pci->ops->write_dbi2) {
929
+ pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
930
+ return;
931
+ }
932
+
933
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
934
+ if (ret)
935
+ dev_err(pci->dev, "write DBI address failed\n");
936
+}
937
+#endif
938
+
939
+static int rk_pcie_ep_set_bar_flag(struct rk_pcie *rk_pcie, enum pci_barno barno, int flags)
940
+{
941
+ enum pci_barno bar = barno;
942
+ u32 reg;
943
+
944
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
945
+
946
+ /* Disabled the upper 32bits BAR to make a 64bits bar pair */
947
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
948
+ dw_pcie_writel_dbi2(rk_pcie->pci, reg + 4, 0);
949
+
950
+ dw_pcie_writel_dbi(rk_pcie->pci, reg, flags);
951
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
952
+ dw_pcie_writel_dbi(rk_pcie->pci, reg + 4, 0);
953
+
954
+ return 0;
955
+}
956
+
569957 static void rk_pcie_ep_setup(struct rk_pcie *rk_pcie)
570958 {
571959 int ret;
....@@ -573,6 +961,8 @@
573961 u32 lanes;
574962 struct device *dev = rk_pcie->pci->dev;
575963 struct device_node *np = dev->of_node;
964
+ int resbar_base;
965
+ int bar;
576966
577967 /* Enable client write and read interrupt */
578968 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
....@@ -636,17 +1026,36 @@
6361026 /* Enable bus master and memory space */
6371027 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_STATUS_COMMAND_REG, 0x6);
6381028
639
- /* Resize BAR0 to 4GB */
640
- /* bit13-8 set to 6 means 64MB */
641
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_RESBAR_CTRL_REG0_REG, 0x600);
1029
+ resbar_base = rk_pci_find_resbar_capability(rk_pcie);
1030
+ if (!resbar_base) {
1031
+ dev_warn(dev, "failed to find resbar_base\n");
1032
+ } else {
1033
+ /* Resize BAR0 to support 512GB, BAR1 to support 8M, BAR2~5 to support 64M */
1034
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4, 0xfffff0);
1035
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8, 0x13c0);
1036
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0xc, 0xfffff0);
1037
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x10, 0x3c0);
1038
+ for (bar = 2; bar < 6; bar++) {
1039
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
1040
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8 + bar * 0x8, 0x6c0);
1041
+ }
6421042
643
- /* Set shadow BAR0 according 64MB */
644
- val = rk_pcie->mem_size - 1;
645
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
1043
+ /* Set flags */
1044
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_0, PCI_BASE_ADDRESS_MEM_TYPE_32);
1045
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_1, PCI_BASE_ADDRESS_MEM_TYPE_32);
1046
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_2, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
1047
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_4, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
1048
+ }
6461049
647
- /* Set reserved memory address to BAR0 */
648
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_BAR0_REG,
649
- rk_pcie->mem_start);
1050
+ /* Device id and class id needed for request bar address */
1051
+ dw_pcie_writew_dbi(rk_pcie->pci, PCI_DEVICE_ID, 0x356a);
1052
+ dw_pcie_writew_dbi(rk_pcie->pci, PCI_CLASS_DEVICE, 0x0580);
1053
+
1054
+ /* Set shadow BAR0 */
1055
+ if (rk_pcie->is_rk1808) {
1056
+ val = rk_pcie->mem_size - 1;
1057
+ dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
1058
+ }
6501059 }
6511060
6521061 static int rk_pcie_ep_win_parse(struct rk_pcie *rk_pcie)
....@@ -722,6 +1131,10 @@
7221131
7231132 dw_pcie_setup_rc(pp);
7241133
1134
+ /* Disable BAR0 BAR1 */
1135
+ dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, 0x0);
1136
+ dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_1, 0x0);
1137
+
7251138 ret = rk_pcie_establish_link(pci);
7261139
7271140 if (pp->msi_irq > 0)
....@@ -754,9 +1167,6 @@
7541167 }
7551168
7561169 pp->ops = &rk_pcie_host_ops;
757
-
758
- if (device_property_read_bool(dev, "msi-map"))
759
- pp->msi_ext = 1;
7601170
7611171 ret = dw_pcie_host_init(pp);
7621172 if (ret) {
....@@ -796,6 +1206,7 @@
7961206 return ret;
7971207 }
7981208
1209
+ rk_pcie->pci->atu_base = rk_pcie->pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
7991210 rk_pcie->pci->iatu_unroll_enabled = rk_pcie_iatu_unroll_enabled(rk_pcie->pci);
8001211
8011212 ret = rk_pcie_ep_atu_init(rk_pcie);
....@@ -812,52 +1223,24 @@
8121223 return ret;
8131224 }
8141225
815
- return 0;
816
-}
1226
+ if (!rk_pcie_udma_enabled(rk_pcie))
1227
+ return 0;
8171228
818
-static void rk_pcie_clk_deinit(struct rk_pcie *rk_pcie)
819
-{
820
- clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks);
821
- clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
1229
+ return 0;
8221230 }
8231231
8241232 static int rk_pcie_clk_init(struct rk_pcie *rk_pcie)
8251233 {
8261234 struct device *dev = rk_pcie->pci->dev;
827
- struct property *prop;
828
- const char *name;
829
- int i = 0, ret, count;
1235
+ int ret;
8301236
831
- count = of_property_count_strings(dev->of_node, "clock-names");
832
- if (count < 1)
1237
+ rk_pcie->clk_cnt = devm_clk_bulk_get_all(dev, &rk_pcie->clks);
1238
+ if (rk_pcie->clk_cnt < 1)
8331239 return -ENODEV;
8341240
835
- rk_pcie->clks = devm_kcalloc(dev, count,
836
- sizeof(struct clk_bulk_data),
837
- GFP_KERNEL);
838
- if (!rk_pcie->clks)
839
- return -ENOMEM;
840
-
841
- rk_pcie->clk_cnt = count;
842
-
843
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
844
- rk_pcie->clks[i].id = name;
845
- if (!rk_pcie->clks[i].id)
846
- return -ENOMEM;
847
- i++;
848
- }
849
-
850
- ret = devm_clk_bulk_get(dev, count, rk_pcie->clks);
851
- if (ret)
852
- return ret;
853
-
854
- ret = clk_bulk_prepare(count, rk_pcie->clks);
855
- if (ret)
856
- return ret;
857
-
858
- ret = clk_bulk_enable(count, rk_pcie->clks);
1241
+ ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
8591242 if (ret) {
860
- clk_bulk_unprepare(count, rk_pcie->clks);
1243
+ dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
8611244 return ret;
8621245 }
8631246
....@@ -882,6 +1265,7 @@
8821265 return PTR_ERR(rk_pcie->dbi_base);
8831266
8841267 rk_pcie->pci->dbi_base = rk_pcie->dbi_base;
1268
+ rk_pcie->pci->dbi_base2 = rk_pcie->pci->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET;
8851269
8861270 apb_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
8871271 "pcie-apb");
....@@ -908,6 +1292,10 @@
9081292 return PTR_ERR(rk_pcie->rst_gpio);
9091293 }
9101294
1295
+ if (device_property_read_u32(&pdev->dev, "rockchip,perst-inactive-ms",
1296
+ &rk_pcie->perst_inactive_ms))
1297
+ rk_pcie->perst_inactive_ms = 200;
1298
+
9111299 rk_pcie->prsnt_gpio = devm_gpiod_get_optional(&pdev->dev, "prsnt", GPIOD_IN);
9121300 if (IS_ERR_OR_NULL(rk_pcie->prsnt_gpio))
9131301 dev_info(&pdev->dev, "invalid prsnt-gpios property in node\n");
....@@ -920,7 +1308,7 @@
9201308 int ret;
9211309 struct device *dev = rk_pcie->pci->dev;
9221310
923
- rk_pcie->phy = devm_phy_get(dev, "pcie-phy");
1311
+ rk_pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
9241312 if (IS_ERR(rk_pcie->phy)) {
9251313 if (PTR_ERR(rk_pcie->phy) != -EPROBE_DEFER)
9261314 dev_info(dev, "missing phy\n");
....@@ -929,23 +1317,27 @@
9291317
9301318 switch (rk_pcie->mode) {
9311319 case RK_PCIE_RC_TYPE:
932
- rk_pcie->phy_mode = PHY_MODE_PCIE_RC;
1320
+ rk_pcie->phy_mode = PHY_MODE_PCIE; /* make no sense */
1321
+ rk_pcie->phy_sub_mode = PHY_MODE_PCIE_RC;
9331322 break;
9341323 case RK_PCIE_EP_TYPE:
935
- rk_pcie->phy_mode = PHY_MODE_PCIE_EP;
1324
+ rk_pcie->phy_mode = PHY_MODE_PCIE;
1325
+ rk_pcie->phy_sub_mode = PHY_MODE_PCIE_EP;
9361326 break;
9371327 }
9381328
939
- ret = phy_set_mode(rk_pcie->phy, rk_pcie->phy_mode);
1329
+ ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1330
+ rk_pcie->phy_sub_mode);
9401331 if (ret) {
9411332 dev_err(dev, "fail to set phy to mode %s, err %d\n",
942
- (rk_pcie->phy_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
1333
+ (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
9431334 ret);
9441335 return ret;
9451336 }
9461337
9471338 if (rk_pcie->bifurcation)
948
- ret = phy_set_mode(rk_pcie->phy, PHY_MODE_PCIE_BIFURCATION);
1339
+ phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1340
+ PHY_MODE_PCIE_BIFURCATION);
9491341
9501342 ret = phy_init(rk_pcie->phy);
9511343 if (ret < 0) {
....@@ -954,53 +1346,6 @@
9541346 }
9551347
9561348 phy_power_on(rk_pcie->phy);
957
-
958
- return 0;
959
-}
960
-
961
-static int rk_pcie_reset_control_release(struct rk_pcie *rk_pcie)
962
-{
963
- struct device *dev = rk_pcie->pci->dev;
964
- struct property *prop;
965
- const char *name;
966
- int ret, count, i = 0;
967
-
968
- count = of_property_count_strings(dev->of_node, "reset-names");
969
- if (count < 1)
970
- return -ENODEV;
971
-
972
- rk_pcie->rsts = devm_kcalloc(dev, count,
973
- sizeof(struct reset_bulk_data),
974
- GFP_KERNEL);
975
- if (!rk_pcie->rsts)
976
- return -ENOMEM;
977
-
978
- of_property_for_each_string(dev->of_node, "reset-names",
979
- prop, name) {
980
- rk_pcie->rsts[i].id = name;
981
- if (!rk_pcie->rsts[i].id)
982
- return -ENOMEM;
983
- i++;
984
- }
985
-
986
- for (i = 0; i < count; i++) {
987
- rk_pcie->rsts[i].rst = devm_reset_control_get_exclusive(dev,
988
- rk_pcie->rsts[i].id);
989
- if (IS_ERR_OR_NULL(rk_pcie->rsts[i].rst)) {
990
- dev_err(dev, "failed to get %s\n",
991
- rk_pcie->clks[i].id);
992
- return -PTR_ERR(rk_pcie->rsts[i].rst);
993
- }
994
- }
995
-
996
- for (i = 0; i < count; i++) {
997
- ret = reset_control_deassert(rk_pcie->rsts[i].rst);
998
- if (ret) {
999
- dev_err(dev, "failed to release %s\n",
1000
- rk_pcie->rsts[i].id);
1001
- return ret;
1002
- }
1003
- }
10041349
10051350 return 0;
10061351 }
....@@ -1105,13 +1450,37 @@
11051450 table->start.chnl = table->chn;
11061451 }
11071452
1453
+static void rk_pcie_hot_rst_work(struct work_struct *work)
1454
+{
1455
+ struct rk_pcie *rk_pcie = container_of(work, struct rk_pcie, hot_rst_work);
1456
+ u32 val, status;
1457
+ int ret;
1458
+
1459
+ /* Setup command register */
1460
+ val = dw_pcie_readl_dbi(rk_pcie->pci, PCI_COMMAND);
1461
+ val &= 0xffff0000;
1462
+ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
1463
+ PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
1464
+ dw_pcie_writel_dbi(rk_pcie->pci, PCI_COMMAND, val);
1465
+
1466
+ if (rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL) & PCIE_LTSSM_APP_DLY2_EN) {
1467
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS,
1468
+ status, ((status & 0x3F) == 0), 100, RK_PCIE_HOTRESET_TMOUT_US);
1469
+ if (ret)
1470
+ dev_err(rk_pcie->pci->dev, "wait for detect quiet failed!\n");
1471
+
1472
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL,
1473
+ (PCIE_LTSSM_APP_DLY2_DONE) | ((PCIE_LTSSM_APP_DLY2_DONE) << 16));
1474
+ }
1475
+}
1476
+
11081477 static irqreturn_t rk_pcie_sys_irq_handler(int irq, void *arg)
11091478 {
11101479 struct rk_pcie *rk_pcie = arg;
11111480 u32 chn;
11121481 union int_status status;
11131482 union int_clear clears;
1114
- u32 reg, val;
1483
+ u32 reg;
11151484
11161485 status.asdword = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
11171486 PCIE_DMA_WR_INT_STATUS);
....@@ -1152,14 +1521,8 @@
11521521 }
11531522
11541523 reg = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC);
1155
- if (reg & BIT(2)) {
1156
- /* Setup command register */
1157
- val = dw_pcie_readl_dbi(rk_pcie->pci, PCI_COMMAND);
1158
- val &= 0xffff0000;
1159
- val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
1160
- PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
1161
- dw_pcie_writel_dbi(rk_pcie->pci, PCI_COMMAND, val);
1162
- }
1524
+ if (reg & BIT(2))
1525
+ queue_work(rk_pcie->hot_rst_wq, &rk_pcie->hot_rst_work);
11631526
11641527 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC, reg);
11651528
....@@ -1215,11 +1578,23 @@
12151578 .data = &rk3528_pcie_rc_of_data,
12161579 },
12171580 {
1581
+ .compatible = "rockchip,rk3562-pcie",
1582
+ .data = &rk3528_pcie_rc_of_data,
1583
+ },
1584
+ {
12181585 .compatible = "rockchip,rk3568-pcie",
12191586 .data = &rk_pcie_rc_of_data,
12201587 },
12211588 {
12221589 .compatible = "rockchip,rk3568-pcie-ep",
1590
+ .data = &rk_pcie_ep_of_data,
1591
+ },
1592
+ {
1593
+ .compatible = "rockchip,rk3588-pcie",
1594
+ .data = &rk_pcie_rc_of_data,
1595
+ },
1596
+ {
1597
+ .compatible = "rockchip,rk3588-pcie-ep",
12231598 .data = &rk_pcie_ep_of_data,
12241599 },
12251600 {},
....@@ -1229,7 +1604,6 @@
12291604
12301605 static const struct dw_pcie_ops dw_pcie_ops = {
12311606 .start_link = rk_pcie_establish_link,
1232
- .link_up = rk_pcie_link_up,
12331607 };
12341608
12351609 static int rk1808_pcie_fixup(struct rk_pcie *rk_pcie, struct device_node *np)
....@@ -1271,7 +1645,8 @@
12711645
12721646 /* LTSSM EN ctrl mode */
12731647 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL);
1274
- val |= PCIE_LTSSM_ENABLE_ENHANCE | (PCIE_LTSSM_ENABLE_ENHANCE << 16);
1648
+ val |= (PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN)
1649
+ | ((PCIE_LTSSM_APP_DLY2_EN | PCIE_LTSSM_ENABLE_ENHANCE) << 16);
12751650 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL, val);
12761651 }
12771652
....@@ -1309,7 +1684,7 @@
13091684 static int rk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
13101685 irq_hw_number_t hwirq)
13111686 {
1312
- irq_set_chip_and_handler(irq, &rk_pcie_legacy_irq_chip, handle_simple_irq);
1687
+ irq_set_chip_and_handler(irq, &rk_pcie_legacy_irq_chip, handle_level_irq);
13131688 irq_set_chip_data(irq, domain->host_data);
13141689
13151690 return 0;
....@@ -1398,41 +1773,6 @@
13981773 return ret;
13991774 }
14001775
1401
-static int rk_pci_find_capability(struct rk_pcie *rk_pcie, int cap)
1402
-{
1403
- u32 header;
1404
- int ttl;
1405
- int start = 0;
1406
- int pos = PCI_CFG_SPACE_SIZE;
1407
-
1408
- /* minimum 8 bytes per capability */
1409
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1410
-
1411
- header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
1412
-
1413
- /*
1414
- * If we have no capabilities, this is indicated by cap ID,
1415
- * cap version and next pointer all being 0.
1416
- */
1417
- if (header == 0)
1418
- return 0;
1419
-
1420
- while (ttl-- > 0) {
1421
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
1422
- return pos;
1423
-
1424
- pos = PCI_EXT_CAP_NEXT(header);
1425
- if (pos < PCI_CFG_SPACE_SIZE)
1426
- break;
1427
-
1428
- header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
1429
- if (!header)
1430
- break;
1431
- }
1432
-
1433
- return 0;
1434
-}
1435
-
14361776 #define RAS_DES_EVENT(ss, v) \
14371777 do { \
14381778 dw_pcie_writel_dbi(pcie->pci, cap_base + 8, v); \
....@@ -1443,8 +1783,27 @@
14431783 {
14441784 struct rk_pcie *pcie = s->private;
14451785 int cap_base;
1786
+ u32 val = rk_pcie_readl_apb(pcie, PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN);
1787
+ char *pm;
14461788
1447
- cap_base = rk_pci_find_capability(pcie, PCI_EXT_CAP_ID_VNDR);
1789
+ if (val & BIT(6))
1790
+ pm = "In training";
1791
+ else if (val & BIT(5))
1792
+ pm = "L1.2";
1793
+ else if (val & BIT(4))
1794
+ pm = "L1.1";
1795
+ else if (val & BIT(3))
1796
+ pm = "L1";
1797
+ else if (val & BIT(2))
1798
+ pm = "L0";
1799
+ else if (val & 0x3)
1800
+ pm = (val == 0x3) ? "L0s" : (val & BIT(1) ? "RX L0s" : "TX L0s");
1801
+ else
1802
+ pm = "Invalid";
1803
+
1804
+ seq_printf(s, "Common event signal status: 0x%s\n", pm);
1805
+
1806
+ cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
14481807 if (!cap_base) {
14491808 dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
14501809 return 0;
....@@ -1480,7 +1839,6 @@
14801839
14811840 return 0;
14821841 }
1483
-
14841842 static int rockchip_pcie_rasdes_open(struct inode *inode, struct file *file)
14851843 {
14861844 return single_open(file, rockchip_pcie_rasdes_show,
....@@ -1499,7 +1857,7 @@
14991857 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
15001858 return -EFAULT;
15011859
1502
- cap_base = rk_pci_find_capability(pcie, PCI_EXT_CAP_ID_VNDR);
1860
+ cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
15031861 if (!cap_base) {
15041862 dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
15051863 return 0;
....@@ -1584,7 +1942,6 @@
15841942 const struct rk_pcie_of_data *data;
15851943 enum rk_pcie_device_mode mode;
15861944 struct device_node *np = pdev->dev.of_node;
1587
- struct platform_driver *drv = to_platform_driver(dev->driver);
15881945 u32 val = 0;
15891946 int irq;
15901947
....@@ -1633,10 +1990,13 @@
16331990
16341991 if (!IS_ERR_OR_NULL(rk_pcie->prsnt_gpio)) {
16351992 if (!gpiod_get_value(rk_pcie->prsnt_gpio)) {
1993
+ dev_info(dev, "device isn't present\n");
16361994 ret = -ENODEV;
16371995 goto release_driver;
16381996 }
16391997 }
1998
+
1999
+ rk_pcie->supports_clkreq = device_property_read_bool(dev, "supports-clkreq");
16402000
16412001 retry_regulator:
16422002 /* DON'T MOVE ME: must be enable before phy init */
....@@ -1667,11 +2027,14 @@
16672027 goto disable_vpcie3v3;
16682028 }
16692029
1670
- ret = rk_pcie_reset_control_release(rk_pcie);
1671
- if (ret) {
1672
- dev_err(dev, "reset control init failed\n");
2030
+ rk_pcie->rsts = devm_reset_control_array_get_exclusive(dev);
2031
+ if (IS_ERR(rk_pcie->rsts)) {
2032
+ ret = PTR_ERR(rk_pcie->rsts);
2033
+ dev_err(dev, "failed to get reset lines\n");
16732034 goto disable_phy;
16742035 }
2036
+
2037
+ reset_control_deassert(rk_pcie->rsts);
16752038
16762039 ret = rk_pcie_request_sys_irq(rk_pcie, pdev);
16772040 if (ret) {
....@@ -1723,17 +2086,39 @@
17232086 rk_pcie->is_signal_test = true;
17242087 }
17252088
1726
- /* Force into compliance mode */
1727
- if (device_property_read_bool(dev, "rockchip,compliance-mode")) {
1728
- val = dw_pcie_readl_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS);
1729
- val |= BIT(4);
1730
- dw_pcie_writel_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS, val);
2089
+ /*
2090
+ * Force into compliance mode
2091
+ * comp_prst is a two dimensional array of which the first element
2092
+ * stands for speed mode, and the second one is preset value encoding:
2093
+ * [0] 0->SMA tool control the signal switch, 1/2/3 is for manual Gen setting
2094
+ * [1] transmitter setting for manual Gen setting, valid only if [0] isn't zero.
2095
+ */
2096
+ if (!device_property_read_u32_array(dev, "rockchip,compliance-mode",
2097
+ rk_pcie->comp_prst, 2)) {
2098
+ BUG_ON(rk_pcie->comp_prst[0] > 3 || rk_pcie->comp_prst[1] > 10);
2099
+ if (!rk_pcie->comp_prst[0]) {
2100
+ dev_info(dev, "Auto compliance mode for SMA tool.\n");
2101
+ } else {
2102
+ dev_info(dev, "compliance mode for soldered board Gen%d, P%d.\n",
2103
+ rk_pcie->comp_prst[0], rk_pcie->comp_prst[1]);
2104
+ val = dw_pcie_readl_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS);
2105
+ val |= BIT(4) | rk_pcie->comp_prst[0] | (rk_pcie->comp_prst[1] << 12);
2106
+ dw_pcie_writel_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS, val);
2107
+ }
17312108 rk_pcie->is_signal_test = true;
17322109 }
17332110
17342111 /* Skip waiting for training to pass in system PM routine */
17352112 if (device_property_read_bool(dev, "rockchip,skip-scan-in-resume"))
17362113 rk_pcie->skip_scan_in_resume = true;
2114
+
2115
+ rk_pcie->hot_rst_wq = create_singlethread_workqueue("rk_pcie_hot_rst_wq");
2116
+ if (!rk_pcie->hot_rst_wq) {
2117
+ dev_err(dev, "failed to create hot_rst workqueue\n");
2118
+ ret = -ENOMEM;
2119
+ goto remove_irq_domain;
2120
+ }
2121
+ INIT_WORK(&rk_pcie->hot_rst_work, rk_pcie_hot_rst_work);
17372122
17382123 switch (rk_pcie->mode) {
17392124 case RK_PCIE_RC_TYPE:
....@@ -1748,12 +2133,12 @@
17482133 return 0;
17492134
17502135 if (ret)
1751
- goto remove_irq_domain;
2136
+ goto remove_rst_wq;
17522137
17532138 ret = rk_pcie_init_dma_trx(rk_pcie);
17542139 if (ret) {
17552140 dev_err(dev, "failed to add dma extension\n");
1756
- return ret;
2141
+ goto remove_rst_wq;
17572142 }
17582143
17592144 if (rk_pcie->dma_obj) {
....@@ -1765,13 +2150,15 @@
17652150 /* hold link reset grant after link-up */
17662151 ret = rk_pcie_reset_grant_ctrl(rk_pcie, false);
17672152 if (ret)
1768
- goto remove_irq_domain;
2153
+ goto remove_rst_wq;
17692154 }
17702155
17712156 dw_pcie_dbi_ro_wr_dis(pci);
17722157
17732158 device_init_wakeup(dev, true);
1774
- drv->driver.pm = &rockchip_dw_pcie_pm_ops;
2159
+
2160
+ /* Enable async system PM for multiports SoC */
2161
+ device_enable_async_suspend(dev);
17752162
17762163 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
17772164 ret = rockchip_pcie_debugfs_init(rk_pcie);
....@@ -1779,7 +2166,7 @@
17792166 dev_err(dev, "failed to setup debugfs: %d\n", ret);
17802167
17812168 /* Enable RASDES Error event by default */
1782
- val = rk_pci_find_capability(rk_pcie, PCI_EXT_CAP_ID_VNDR);
2169
+ val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_VNDR);
17832170 if (!val) {
17842171 dev_err(dev, "Not able to find RASDES CAP!\n");
17852172 return 0;
....@@ -1791,6 +2178,8 @@
17912178
17922179 return 0;
17932180
2181
+remove_rst_wq:
2182
+ destroy_workqueue(rk_pcie->hot_rst_wq);
17942183 remove_irq_domain:
17952184 if (rk_pcie->irq_domain)
17962185 irq_domain_remove(rk_pcie->irq_domain);
....@@ -1798,10 +2187,9 @@
17982187 phy_power_off(rk_pcie->phy);
17992188 phy_exit(rk_pcie->phy);
18002189 deinit_clk:
1801
- rk_pcie_clk_deinit(rk_pcie);
2190
+ clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
18022191 disable_vpcie3v3:
18032192 rk_pcie_disable_power(rk_pcie);
1804
-
18052193 release_driver:
18062194 if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT))
18072195 device_release_driver(dev);
....@@ -1826,13 +2214,154 @@
18262214 return rk_pcie_really_probe(pdev);
18272215 }
18282216
2217
+#ifdef CONFIG_PCIEASPM
2218
+static void rk_pcie_downstream_dev_to_d0(struct rk_pcie *rk_pcie, bool enable)
2219
+{
2220
+ struct pcie_port *pp = &rk_pcie->pci->pp;
2221
+ struct pci_bus *child, *root_bus = NULL;
2222
+ struct pci_dev *pdev, *bridge;
2223
+ u32 val;
2224
+
2225
+ list_for_each_entry(child, &pp->bridge->bus->children, node) {
2226
+ /* Bring downstream devices to D3 if they are not already in */
2227
+ if (child->parent == pp->bridge->bus) {
2228
+ root_bus = child;
2229
+ bridge = root_bus->self;
2230
+ break;
2231
+ }
2232
+ }
2233
+
2234
+ if (!root_bus) {
2235
+ dev_err(rk_pcie->pci->dev, "Failed to find downstream devices\n");
2236
+ return;
2237
+ }
2238
+
2239
+ /* Save and restore root bus ASPM */
2240
+ if (enable) {
2241
+ if (rk_pcie->l1ss_ctl1)
2242
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1, rk_pcie->l1ss_ctl1);
2243
+
2244
+ /* rk_pcie->aspm woule be saved in advance when enable is false */
2245
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, rk_pcie->aspm);
2246
+ } else {
2247
+ val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1);
2248
+ if (val & PCI_L1SS_CTL1_L1SS_MASK)
2249
+ rk_pcie->l1ss_ctl1 = val;
2250
+ else
2251
+ rk_pcie->l1ss_ctl1 = 0;
2252
+
2253
+ val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL);
2254
+ rk_pcie->aspm = val & PCI_EXP_LNKCTL_ASPMC;
2255
+ val &= ~(PCI_EXP_LNKCAP_ASPM_L1 | PCI_EXP_LNKCAP_ASPM_L0S);
2256
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, val);
2257
+ }
2258
+
2259
+ list_for_each_entry(pdev, &root_bus->devices, bus_list) {
2260
+ if (PCI_SLOT(pdev->devfn) == 0) {
2261
+ if (pci_set_power_state(pdev, PCI_D0))
2262
+ dev_err(rk_pcie->pci->dev,
2263
+ "Failed to transition %s to D3hot state\n",
2264
+ dev_name(&pdev->dev));
2265
+ if (enable) {
2266
+ if (rk_pcie->l1ss_ctl1) {
2267
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, &val);
2268
+ val &= ~PCI_L1SS_CTL1_L1SS_MASK;
2269
+ val |= (rk_pcie->l1ss_ctl1 & PCI_L1SS_CTL1_L1SS_MASK);
2270
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, val);
2271
+ }
2272
+
2273
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
2274
+ PCI_EXP_LNKCTL_ASPMC, rk_pcie->aspm);
2275
+ } else {
2276
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2277
+ }
2278
+ }
2279
+ }
2280
+}
2281
+#endif
2282
+
18292283 static int __maybe_unused rockchip_dw_pcie_suspend(struct device *dev)
18302284 {
18312285 struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
1832
- int ret;
2286
+ int ret = 0, power;
2287
+ struct dw_pcie *pci = rk_pcie->pci;
2288
+ u32 status;
2289
+
2290
+ /*
2291
+ * This is as per PCI Express Base r5.0 r1.0 May 22-2019,
2292
+ * 5.2 Link State Power Management (Page #440).
2293
+ *
2294
+ * L2/L3 Ready entry negotiations happen while in the L0 state.
2295
+ * L2/L3 Ready are entered only after the negotiation completes.
2296
+ *
2297
+ * The following example sequence illustrates the multi-step Link state
2298
+ * transition process leading up to entering a system sleep state:
2299
+ * 1. System software directs all Functions of a Downstream component to D3Hot.
2300
+ * 2. The Downstream component then initiates the transition of the Link to L1
2301
+ * as required.
2302
+ * 3. System software then causes the Root Complex to broadcast the PME_Turn_Off
2303
+ * Message in preparation for removing the main power source.
2304
+ * 4. This Message causes the subject Link to transition back to L0 in order to
2305
+ * send it and to enable the Downstream component to respond with PME_TO_Ack.
2306
+ * 5. After sending the PME_TO_Ack, the Downstream component initiates the L2/L3
2307
+ * Ready transition protocol.
2308
+ */
2309
+
2310
+ /* 1. All sub-devices are in D3hot by PCIe stack */
2311
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
18332312
18342313 rk_pcie_link_status_clear(rk_pcie);
2314
+
2315
+ /*
2316
+ * Wlan devices will be shutdown from function driver now, so doing L2 here
2317
+ * must fail. Skip L2 routine.
2318
+ */
2319
+ if (rk_pcie->skip_scan_in_resume) {
2320
+ rfkill_get_wifi_power_state(&power);
2321
+ if (!power)
2322
+ goto no_l2;
2323
+ }
2324
+
2325
+ /* 2. Broadcast PME_Turn_Off Message */
2326
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_MSG_GEN, PME_TURN_OFF);
2327
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_MSG_GEN,
2328
+ status, !(status & BIT(4)), 20, RK_PCIE_L2_TMOUT_US);
2329
+ if (ret) {
2330
+ dev_err(dev, "Failed to send PME_Turn_Off\n");
2331
+ goto no_l2;
2332
+ }
2333
+
2334
+ /* 3. Wait for PME_TO_Ack */
2335
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_INTR_STATUS_MSG_RX,
2336
+ status, status & BIT(9), 20, RK_PCIE_L2_TMOUT_US);
2337
+ if (ret) {
2338
+ dev_err(dev, "Failed to receive PME_TO_Ack\n");
2339
+ goto no_l2;
2340
+ }
2341
+
2342
+ /* 4. Clear PME_TO_Ack and Wait for ready to enter L23 message */
2343
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MSG_RX, PME_TO_ACK);
2344
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_POWER,
2345
+ status, status & READY_ENTER_L23, 20, RK_PCIE_L2_TMOUT_US);
2346
+ if (ret) {
2347
+ dev_err(dev, "Failed to ready to enter L23\n");
2348
+ goto no_l2;
2349
+ }
2350
+
2351
+ /* 5. Check we are in L2 */
2352
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS,
2353
+ status, ((status & S_MAX) == S_L2_IDLE), 20, RK_PCIE_L2_TMOUT_US);
2354
+ if (ret)
2355
+ dev_err(pci->dev, "Link isn't in L2 idle!\n");
2356
+
2357
+no_l2:
18352358 rk_pcie_disable_ltssm(rk_pcie);
2359
+
2360
+ ret = phy_validate(rk_pcie->phy, PHY_TYPE_PCIE, 0, NULL);
2361
+ if (ret && ret != -EOPNOTSUPP) {
2362
+ dev_err(dev, "PHY is reused by other controller, check the dts!\n");
2363
+ return ret;
2364
+ }
18362365
18372366 /* make sure assert phy success */
18382367 usleep_range(200, 300);
....@@ -1840,7 +2369,9 @@
18402369 phy_power_off(rk_pcie->phy);
18412370 phy_exit(rk_pcie->phy);
18422371
1843
- clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks);
2372
+ rk_pcie->intx = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY);
2373
+
2374
+ clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
18442375
18452376 rk_pcie->in_suspend = true;
18462377
....@@ -1856,20 +2387,25 @@
18562387 bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
18572388 int ret;
18582389
2390
+ reset_control_assert(rk_pcie->rsts);
2391
+ udelay(10);
2392
+ reset_control_deassert(rk_pcie->rsts);
2393
+
18592394 ret = rk_pcie_enable_power(rk_pcie);
18602395 if (ret)
18612396 return ret;
18622397
1863
- ret = clk_bulk_enable(rk_pcie->clk_cnt, rk_pcie->clks);
2398
+ ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
18642399 if (ret) {
1865
- clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
2400
+ dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
18662401 return ret;
18672402 }
18682403
1869
- ret = phy_set_mode(rk_pcie->phy, rk_pcie->phy_mode);
2404
+ ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
2405
+ rk_pcie->phy_sub_mode);
18702406 if (ret) {
18712407 dev_err(dev, "fail to set phy to mode %s, err %d\n",
1872
- (rk_pcie->phy_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
2408
+ (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
18732409 ret);
18742410 return ret;
18752411 }
....@@ -1898,6 +2434,9 @@
18982434
18992435 if (std_rc)
19002436 dw_pcie_setup_rc(&rk_pcie->pci->pp);
2437
+
2438
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
2439
+ rk_pcie->intx | 0xffff0000);
19012440
19022441 ret = rk_pcie_establish_link(rk_pcie->pci);
19032442 if (ret) {
....@@ -1937,7 +2476,33 @@
19372476 return ret;
19382477 }
19392478
2479
+#ifdef CONFIG_PCIEASPM
2480
+static int rockchip_dw_pcie_prepare(struct device *dev)
2481
+{
2482
+ struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2483
+
2484
+ dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2485
+ rk_pcie_downstream_dev_to_d0(rk_pcie, false);
2486
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2487
+
2488
+ return 0;
2489
+}
2490
+
2491
+static void rockchip_dw_pcie_complete(struct device *dev)
2492
+{
2493
+ struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2494
+
2495
+ dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2496
+ rk_pcie_downstream_dev_to_d0(rk_pcie, true);
2497
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2498
+}
2499
+#endif
2500
+
19402501 static const struct dev_pm_ops rockchip_dw_pcie_pm_ops = {
2502
+#ifdef CONFIG_PCIEASPM
2503
+ .prepare = rockchip_dw_pcie_prepare,
2504
+ .complete = rockchip_dw_pcie_complete,
2505
+#endif
19412506 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_dw_pcie_suspend,
19422507 rockchip_dw_pcie_resume)
19432508 };
....@@ -1947,10 +2512,12 @@
19472512 .name = "rk-pcie",
19482513 .of_match_table = rk_pcie_of_match,
19492514 .suppress_bind_attrs = true,
2515
+ .pm = &rockchip_dw_pcie_pm_ops,
19502516 },
2517
+ .probe = rk_pcie_probe,
19512518 };
19522519
1953
-module_platform_driver_probe(rk_plat_pcie_driver, rk_pcie_probe);
2520
+module_platform_driver(rk_plat_pcie_driver);
19542521
19552522 MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com>");
19562523 MODULE_DESCRIPTION("RockChip PCIe Controller driver");