forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 1f93a7dfd1f8d5ff7a5c53246c7534fe2332d6f4
kernel/drivers/pci/controller/dwc/pcie-dw-rockchip.c
....@@ -14,6 +14,7 @@
1414 #include <linux/gpio.h>
1515 #include <linux/init.h>
1616 #include <linux/interrupt.h>
17
+#include <linux/iopoll.h>
1718 #include <linux/irq.h>
1819 #include <linux/irqchip/chained_irq.h>
1920 #include <linux/irqdomain.h>
....@@ -29,6 +30,7 @@
2930 #include <linux/of_pci.h>
3031 #include <linux/pci.h>
3132 #include <linux/phy/phy.h>
33
+#include <linux/phy/pcie.h>
3234 #include <linux/platform_device.h>
3335 #include <linux/poll.h>
3436 #include <linux/regmap.h>
....@@ -50,15 +52,11 @@
5052 RK_PCIE_RC_TYPE,
5153 };
5254
53
-struct reset_bulk_data {
54
- const char *id;
55
- struct reset_control *rst;
56
-};
57
-
5855 #define RK_PCIE_DBG 0
5956
6057 #define PCIE_DMA_OFFSET 0x380000
6158
59
+#define PCIE_DMA_CTRL_OFF 0x8
6260 #define PCIE_DMA_WR_ENB 0xc
6361 #define PCIE_DMA_WR_CTRL_LO 0x200
6462 #define PCIE_DMA_WR_CTRL_HI 0x204
....@@ -102,6 +100,8 @@
102100
103101 #define PCIE_CAP_LINK_CONTROL2_LINK_STATUS 0xa0
104102
103
+#define PCIE_CLIENT_INTR_STATUS_MSG_RX 0x04
104
+#define PME_TO_ACK (BIT(9) | BIT(25))
105105 #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x08
106106 #define PCIE_CLIENT_INTR_STATUS_MISC 0x10
107107 #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
....@@ -109,12 +109,17 @@
109109 #define MASK_LEGACY_INT(x) (0x00110011 << x)
110110 #define UNMASK_LEGACY_INT(x) (0x00110000 << x)
111111 #define PCIE_CLIENT_INTR_MASK 0x24
112
+#define PCIE_CLIENT_POWER 0x2c
113
+#define READY_ENTER_L23 BIT(3)
114
+#define PCIE_CLIENT_MSG_GEN 0x34
115
+#define PME_TURN_OFF (BIT(4) | BIT(20))
112116 #define PCIE_CLIENT_GENERAL_DEBUG 0x104
113117 #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
114118 #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
115119 #define PCIE_CLIENT_LTSSM_STATUS 0x300
116120 #define SMLH_LINKUP BIT(16)
117121 #define RDLH_LINKUP BIT(17)
122
+#define PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN 0x154
118123 #define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310
119124 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320
120125 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324
....@@ -122,20 +127,30 @@
122127 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c
123128 #define PCIE_CLIENT_DBG_FIFO_STATUS 0x350
124129 #define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000
125
-#define PCIE_CLIENT_DBF_EN 0xffff0003
130
+#define PCIE_CLIENT_DBF_EN 0xffff0007
126131
127132 #define PCIE_PHY_LINKUP BIT(0)
128133 #define PCIE_DATA_LINKUP BIT(1)
129134
130
-#define PCIE_RESBAR_CTRL_REG0_REG 0x2a8
135
+#define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000
131136 #define PCIE_SB_BAR0_MASK_REG 0x100010
132137
133138 #define PCIE_PL_ORDER_RULE_CTRL_OFF 0x8B4
139
+#define RK_PCIE_L2_TMOUT_US 5000
140
+
141
+enum rk_pcie_ltssm_code {
142
+ S_L0 = 0x11,
143
+ S_L0S = 0x12,
144
+ S_L1_IDLE = 0x14,
145
+ S_L2_IDLE = 0x15,
146
+ S_MAX = 0x1f,
147
+};
134148
135149 struct rk_pcie {
136150 struct dw_pcie *pci;
137151 enum rk_pcie_device_mode mode;
138152 enum phy_mode phy_mode;
153
+ int phy_sub_mode;
139154 unsigned char bar_to_atu[6];
140155 phys_addr_t *outbound_addr;
141156 unsigned long *ib_window_map;
....@@ -146,9 +161,10 @@
146161 void __iomem *apb_base;
147162 struct phy *phy;
148163 struct clk_bulk_data *clks;
164
+ struct reset_control *rsts;
149165 unsigned int clk_cnt;
150
- struct reset_bulk_data *rsts;
151166 struct gpio_desc *rst_gpio;
167
+ u32 perst_inactive_ms;
152168 struct gpio_desc *prsnt_gpio;
153169 phys_addr_t mem_start;
154170 size_t mem_size;
....@@ -157,13 +173,16 @@
157173 struct regmap *pmu_grf;
158174 struct dma_trx_obj *dma_obj;
159175 bool in_suspend;
160
- bool skip_scan_in_resume;
176
+ bool skip_scan_in_resume;
161177 bool is_rk1808;
162178 bool is_signal_test;
163179 bool bifurcation;
180
+ bool supports_clkreq;
164181 struct regulator *vpcie3v3;
165182 struct irq_domain *irq_domain;
166183 raw_spinlock_t intx_lock;
184
+ u16 aspm;
185
+ u32 l1ss_ctl1;
167186 struct dentry *debugfs;
168187 u32 msi_vector_num;
169188 };
....@@ -174,7 +193,6 @@
174193 };
175194
176195 #define to_rk_pcie(x) dev_get_drvdata((x)->dev)
177
-static const struct dev_pm_ops rockchip_dw_pcie_pm_ops;
178196
179197 static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
180198 {
....@@ -259,12 +277,154 @@
259277 return 0;
260278 }
261279
280
+static void rk_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
281
+{
282
+ int ret;
283
+
284
+ if (pci->ops->write_dbi) {
285
+ pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
286
+ return;
287
+ }
288
+
289
+ ret = dw_pcie_write(pci->atu_base + reg, 4, val);
290
+ if (ret)
291
+ dev_err(pci->dev, "Write ATU address failed\n");
292
+}
293
+
294
+static void rk_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
295
+ u32 val)
296
+{
297
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
298
+
299
+ rk_pcie_writel_atu(pci, offset + reg, val);
300
+}
301
+
302
+static u32 rk_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
303
+{
304
+ int ret;
305
+ u32 val;
306
+
307
+ if (pci->ops->read_dbi)
308
+ return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
309
+
310
+ ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
311
+ if (ret)
312
+ dev_err(pci->dev, "Read ATU address failed\n");
313
+
314
+ return val;
315
+}
316
+
317
+static u32 rk_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
318
+{
319
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
320
+
321
+ return rk_pcie_readl_atu(pci, offset + reg);
322
+}
323
+
324
+static int rk_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
325
+ int index, int bar, u64 cpu_addr,
326
+ enum dw_pcie_as_type as_type)
327
+{
328
+ int type;
329
+ u32 retries, val;
330
+
331
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
332
+ lower_32_bits(cpu_addr));
333
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
334
+ upper_32_bits(cpu_addr));
335
+
336
+ switch (as_type) {
337
+ case DW_PCIE_AS_MEM:
338
+ type = PCIE_ATU_TYPE_MEM;
339
+ break;
340
+ case DW_PCIE_AS_IO:
341
+ type = PCIE_ATU_TYPE_IO;
342
+ break;
343
+ default:
344
+ return -EINVAL;
345
+ }
346
+
347
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
348
+ PCIE_ATU_FUNC_NUM(func_no));
349
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
350
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
351
+ PCIE_ATU_ENABLE |
352
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
353
+
354
+ /*
355
+ * Make sure ATU enable takes effect before any subsequent config
356
+ * and I/O accesses.
357
+ */
358
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
359
+ val = rk_pcie_readl_ib_unroll(pci, index,
360
+ PCIE_ATU_UNR_REGION_CTRL2);
361
+ if (val & PCIE_ATU_ENABLE)
362
+ return 0;
363
+
364
+ mdelay(LINK_WAIT_IATU);
365
+ }
366
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
367
+
368
+ return -EBUSY;
369
+}
370
+
371
+
372
+static int rk_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
373
+ int bar, u64 cpu_addr,
374
+ enum dw_pcie_as_type as_type)
375
+{
376
+ int type;
377
+ u32 retries, val;
378
+
379
+ if (pci->iatu_unroll_enabled)
380
+ return rk_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
381
+ cpu_addr, as_type);
382
+
383
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
384
+ index);
385
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
386
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
387
+
388
+ switch (as_type) {
389
+ case DW_PCIE_AS_MEM:
390
+ type = PCIE_ATU_TYPE_MEM;
391
+ break;
392
+ case DW_PCIE_AS_IO:
393
+ type = PCIE_ATU_TYPE_IO;
394
+ break;
395
+ default:
396
+ return -EINVAL;
397
+ }
398
+
399
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
400
+ PCIE_ATU_FUNC_NUM(func_no));
401
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
402
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
403
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
404
+
405
+ /*
406
+ * Make sure ATU enable takes effect before any subsequent config
407
+ * and I/O accesses.
408
+ */
409
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
410
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
411
+ if (val & PCIE_ATU_ENABLE)
412
+ return 0;
413
+
414
+ mdelay(LINK_WAIT_IATU);
415
+ }
416
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
417
+
418
+ return -EBUSY;
419
+}
420
+
262421 static int rk_pcie_ep_inbound_atu(struct rk_pcie *rk_pcie,
263422 enum pci_barno bar, dma_addr_t cpu_addr,
264423 enum dw_pcie_as_type as_type)
265424 {
266425 int ret;
267426 u32 free_win;
427
+ u8 func_no = 0x0;
268428
269429 if (rk_pcie->in_suspend) {
270430 free_win = rk_pcie->bar_to_atu[bar];
....@@ -277,8 +437,8 @@
277437 }
278438 }
279439
280
- ret = dw_pcie_prog_inbound_atu(rk_pcie->pci, free_win, bar, cpu_addr,
281
- as_type);
440
+ ret = rk_pcie_prog_inbound_atu(rk_pcie->pci, func_no, free_win, bar,
441
+ cpu_addr, as_type);
282442 if (ret < 0) {
283443 dev_err(rk_pcie->pci->dev, "Failed to program IB window\n");
284444 return ret;
....@@ -291,6 +451,105 @@
291451 set_bit(free_win, rk_pcie->ib_window_map);
292452
293453 return 0;
454
+}
455
+
456
+static void rk_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
457
+ u32 val)
458
+{
459
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
460
+
461
+ rk_pcie_writel_atu(pci, offset + reg, val);
462
+}
463
+
464
+static u32 rk_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
465
+{
466
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
467
+
468
+ return rk_pcie_readl_atu(pci, offset + reg);
469
+}
470
+
471
+static void rk_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
472
+ int index, int type,
473
+ u64 cpu_addr, u64 pci_addr,
474
+ u32 size)
475
+{
476
+ u32 retries, val;
477
+ u64 limit_addr = cpu_addr + size - 1;
478
+
479
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
480
+ lower_32_bits(cpu_addr));
481
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
482
+ upper_32_bits(cpu_addr));
483
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
484
+ lower_32_bits(limit_addr));
485
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
486
+ upper_32_bits(limit_addr));
487
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
488
+ lower_32_bits(pci_addr));
489
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
490
+ upper_32_bits(pci_addr));
491
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
492
+ type | PCIE_ATU_FUNC_NUM(func_no));
493
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
494
+ PCIE_ATU_ENABLE);
495
+
496
+ /*
497
+ * Make sure ATU enable takes effect before any subsequent config
498
+ * and I/O accesses.
499
+ */
500
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
501
+ val = rk_pcie_readl_ob_unroll(pci, index,
502
+ PCIE_ATU_UNR_REGION_CTRL2);
503
+ if (val & PCIE_ATU_ENABLE)
504
+ return;
505
+
506
+ mdelay(LINK_WAIT_IATU);
507
+ }
508
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
509
+}
510
+
511
+static void rk_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
512
+ int type, u64 cpu_addr, u64 pci_addr, u32 size)
513
+{
514
+ u32 retries, val;
515
+
516
+ if (pci->ops->cpu_addr_fixup)
517
+ cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
518
+
519
+ if (pci->iatu_unroll_enabled) {
520
+ rk_pcie_prog_outbound_atu_unroll(pci, 0x0, index, type,
521
+ cpu_addr, pci_addr, size);
522
+ return;
523
+ }
524
+
525
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
526
+ PCIE_ATU_REGION_OUTBOUND | index);
527
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
528
+ lower_32_bits(cpu_addr));
529
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
530
+ upper_32_bits(cpu_addr));
531
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
532
+ lower_32_bits(cpu_addr + size - 1));
533
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
534
+ lower_32_bits(pci_addr));
535
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
536
+ upper_32_bits(pci_addr));
537
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
538
+ PCIE_ATU_FUNC_NUM(0x0));
539
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
540
+
541
+ /*
542
+ * Make sure ATU enable takes effect before any subsequent config
543
+ * and I/O accesses.
544
+ */
545
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
546
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
547
+ if (val & PCIE_ATU_ENABLE)
548
+ return;
549
+
550
+ mdelay(LINK_WAIT_IATU);
551
+ }
552
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
294553 }
295554
296555 static int rk_pcie_ep_outbound_atu(struct rk_pcie *rk_pcie,
....@@ -311,7 +570,7 @@
311570 }
312571 }
313572
314
- dw_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
573
+ rk_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
315574 phys_addr, pci_addr, size);
316575
317576 if (rk_pcie->in_suspend)
....@@ -368,6 +627,28 @@
368627 return 0;
369628 }
370629
630
+#if defined(CONFIG_PCIEASPM)
631
+static void disable_aspm_l1ss(struct rk_pcie *rk_pcie)
632
+{
633
+ u32 val, cfg_link_cap_l1sub;
634
+
635
+ val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_L1SS);
636
+ if (!val) {
637
+ dev_err(rk_pcie->pci->dev, "can't find l1ss cap\n");
638
+
639
+ return;
640
+ }
641
+
642
+ cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
643
+
644
+ val = dw_pcie_readl_dbi(rk_pcie->pci, cfg_link_cap_l1sub);
645
+ val &= ~(PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2 | PCI_L1SS_CAP_L1_PM_SS);
646
+ dw_pcie_writel_dbi(rk_pcie->pci, cfg_link_cap_l1sub, val);
647
+}
648
+#else
649
+static inline void disable_aspm_l1ss(struct rk_pcie *rk_pcie) { return; }
650
+#endif
651
+
371652 static inline void rk_pcie_set_mode(struct rk_pcie *rk_pcie)
372653 {
373654 switch (rk_pcie->mode) {
....@@ -375,6 +656,14 @@
375656 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00000);
376657 break;
377658 case RK_PCIE_RC_TYPE:
659
+ if (rk_pcie->supports_clkreq) {
660
+ /* Application is ready to have reference clock removed */
661
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x00010001);
662
+ } else {
663
+ /* Pull down CLKREQ# to assert the connecting CLOCK_GEN OE */
664
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x30011000);
665
+ disable_aspm_l1ss(rk_pcie);
666
+ }
378667 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00040);
379668 /*
380669 * Disable order rule for CPL can't pass halted P queue.
....@@ -411,8 +700,7 @@
411700
412701 if (rk_pcie->is_rk1808) {
413702 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG);
414
- if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3 &&
415
- ((val & GENMASK(15, 10)) >> 10) == 0x11)
703
+ if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3)
416704 return 1;
417705 } else {
418706 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS);
....@@ -425,7 +713,8 @@
425713
426714 static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie)
427715 {
428
-#if RK_PCIE_DBG
716
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
717
+ return;
429718 if (rk_pcie->is_rk1808 == true)
430719 return;
431720
....@@ -439,7 +728,6 @@
439728 PCIE_CLIENT_DBG_TRANSITION_DATA);
440729 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON,
441730 PCIE_CLIENT_DBF_EN);
442
-#endif
443731 }
444732
445733 static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie)
....@@ -472,6 +760,9 @@
472760 return 0;
473761 }
474762
763
+ /* Rest the device */
764
+ gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
765
+
475766 rk_pcie_disable_ltssm(rk_pcie);
476767 rk_pcie_link_status_clear(rk_pcie);
477768 rk_pcie_enable_debug(rk_pcie);
....@@ -502,18 +793,19 @@
502793 * PERST and T_PVPERL (Power stable to PERST# inactive) should be a
503794 * minimum of 100ms. See table 2-4 in section 2.6.2 AC, the PCI Express
504795 * Card Electromechanical Specification 3.0. So 100ms in total is the min
505
- * requuirement here. We add a 1s for sake of hoping everthings work fine.
796
+ * requuirement here. We add a 200ms by default for sake of hoping everthings
797
+ * work fine. If it doesn't, please add more in DT node by add rockchip,perst-inactive-ms.
506798 */
507
- msleep(1000);
799
+ msleep(rk_pcie->perst_inactive_ms);
508800 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
509801
510802 /*
511803 * Add this 1ms delay because we observe link is always up stably after it and
512804 * could help us save 20ms for scanning devices.
513805 */
514
- usleep_range(1000, 1100);
806
+ usleep_range(1000, 1100);
515807
516
- for (retries = 0; retries < 10; retries++) {
808
+ for (retries = 0; retries < 100; retries++) {
517809 if (dw_pcie_link_up(pci)) {
518810 /*
519811 * We may be here in case of L0 in Gen1. But if EP is capable
....@@ -522,17 +814,20 @@
522814 * that LTSSM max timeout is 24ms per period, we can wait a bit
523815 * more for Gen switch.
524816 */
525
- msleep(100);
526
- dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
527
- rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
528
- rk_pcie_debug_dump(rk_pcie);
529
- return 0;
817
+ msleep(50);
818
+ /* In case link drop after linkup, double check it */
819
+ if (dw_pcie_link_up(pci)) {
820
+ dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
821
+ rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
822
+ rk_pcie_debug_dump(rk_pcie);
823
+ return 0;
824
+ }
530825 }
531826
532827 dev_info_ratelimited(pci->dev, "PCIe Linking... LTSSM is 0x%x\n",
533828 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
534829 rk_pcie_debug_dump(rk_pcie);
535
- msleep(1000);
830
+ msleep(20);
536831 }
537832
538833 dev_err(pci->dev, "PCIe Link Fail\n");
....@@ -540,12 +835,23 @@
540835 return rk_pcie->is_signal_test == true ? 0 : -EINVAL;
541836 }
542837
838
+static bool rk_pcie_udma_enabled(struct rk_pcie *rk_pcie)
839
+{
840
+ return dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
841
+ PCIE_DMA_CTRL_OFF);
842
+}
843
+
543844 static int rk_pcie_init_dma_trx(struct rk_pcie *rk_pcie)
544845 {
846
+ if (!rk_pcie_udma_enabled(rk_pcie))
847
+ return 0;
848
+
545849 rk_pcie->dma_obj = rk_pcie_dma_obj_probe(rk_pcie->pci->dev);
546850 if (IS_ERR(rk_pcie->dma_obj)) {
547851 dev_err(rk_pcie->pci->dev, "failed to prepare dma object\n");
548852 return -EINVAL;
853
+ } else if (rk_pcie->dma_obj) {
854
+ goto out;
549855 }
550856
551857 rk_pcie->dma_obj = pcie_dw_dmatest_register(rk_pcie->pci->dev, true);
....@@ -553,7 +859,7 @@
553859 dev_err(rk_pcie->pci->dev, "failed to prepare dmatest\n");
554860 return -EINVAL;
555861 }
556
-
862
+out:
557863 /* Enable client write and read interrupt */
558864 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
559865
....@@ -566,6 +872,76 @@
566872 return 0;
567873 }
568874
875
+static int rk_pci_find_resbar_capability(struct rk_pcie *rk_pcie)
876
+{
877
+ u32 header;
878
+ int ttl;
879
+ int start = 0;
880
+ int pos = PCI_CFG_SPACE_SIZE;
881
+ int cap = PCI_EXT_CAP_ID_REBAR;
882
+
883
+ /* minimum 8 bytes per capability */
884
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
885
+
886
+ header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
887
+
888
+ /*
889
+ * If we have no capabilities, this is indicated by cap ID,
890
+ * cap version and next pointer all being 0.
891
+ */
892
+ if (header == 0)
893
+ return 0;
894
+
895
+ while (ttl-- > 0) {
896
+ if (PCI_EXT_CAP_ID(header) == cap && pos != start)
897
+ return pos;
898
+
899
+ pos = PCI_EXT_CAP_NEXT(header);
900
+ if (pos < PCI_CFG_SPACE_SIZE)
901
+ break;
902
+
903
+ header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
904
+ if (!header)
905
+ break;
906
+ }
907
+
908
+ return 0;
909
+}
910
+
911
+#ifdef MODULE
912
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
913
+{
914
+ int ret;
915
+
916
+ if (pci->ops && pci->ops->write_dbi2) {
917
+ pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
918
+ return;
919
+ }
920
+
921
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
922
+ if (ret)
923
+ dev_err(pci->dev, "write DBI address failed\n");
924
+}
925
+#endif
926
+
927
+static int rk_pcie_ep_set_bar_flag(struct rk_pcie *rk_pcie, enum pci_barno barno, int flags)
928
+{
929
+ enum pci_barno bar = barno;
930
+ u32 reg;
931
+
932
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
933
+
934
+ /* Disabled the upper 32bits BAR to make a 64bits bar pair */
935
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
936
+ dw_pcie_writel_dbi2(rk_pcie->pci, reg + 4, 0);
937
+
938
+ dw_pcie_writel_dbi(rk_pcie->pci, reg, flags);
939
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
940
+ dw_pcie_writel_dbi(rk_pcie->pci, reg + 4, 0);
941
+
942
+ return 0;
943
+}
944
+
569945 static void rk_pcie_ep_setup(struct rk_pcie *rk_pcie)
570946 {
571947 int ret;
....@@ -573,6 +949,8 @@
573949 u32 lanes;
574950 struct device *dev = rk_pcie->pci->dev;
575951 struct device_node *np = dev->of_node;
952
+ int resbar_base;
953
+ int bar;
576954
577955 /* Enable client write and read interrupt */
578956 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
....@@ -636,17 +1014,36 @@
6361014 /* Enable bus master and memory space */
6371015 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_STATUS_COMMAND_REG, 0x6);
6381016
639
- /* Resize BAR0 to 4GB */
640
- /* bit13-8 set to 6 means 64MB */
641
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_RESBAR_CTRL_REG0_REG, 0x600);
1017
+ resbar_base = rk_pci_find_resbar_capability(rk_pcie);
1018
+ if (!resbar_base) {
1019
+ dev_warn(dev, "failed to find resbar_base\n");
1020
+ } else {
1021
+ /* Resize BAR0 to support 512GB, BAR1 to support 8M, BAR2~5 to support 64M */
1022
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4, 0xfffff0);
1023
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8, 0x13c0);
1024
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0xc, 0xfffff0);
1025
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x10, 0x3c0);
1026
+ for (bar = 2; bar < 6; bar++) {
1027
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
1028
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8 + bar * 0x8, 0x6c0);
1029
+ }
6421030
643
- /* Set shadow BAR0 according 64MB */
644
- val = rk_pcie->mem_size - 1;
645
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
1031
+ /* Set flags */
1032
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_0, PCI_BASE_ADDRESS_MEM_TYPE_32);
1033
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_1, PCI_BASE_ADDRESS_MEM_TYPE_32);
1034
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_2, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
1035
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_4, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
1036
+ }
6461037
647
- /* Set reserved memory address to BAR0 */
648
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_BAR0_REG,
649
- rk_pcie->mem_start);
1038
+ /* Device id and class id needed for request bar address */
1039
+ dw_pcie_writew_dbi(rk_pcie->pci, PCI_DEVICE_ID, 0x356a);
1040
+ dw_pcie_writew_dbi(rk_pcie->pci, PCI_CLASS_DEVICE, 0x0580);
1041
+
1042
+ /* Set shadow BAR0 */
1043
+ if (rk_pcie->is_rk1808) {
1044
+ val = rk_pcie->mem_size - 1;
1045
+ dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
1046
+ }
6501047 }
6511048
6521049 static int rk_pcie_ep_win_parse(struct rk_pcie *rk_pcie)
....@@ -755,9 +1152,6 @@
7551152
7561153 pp->ops = &rk_pcie_host_ops;
7571154
758
- if (device_property_read_bool(dev, "msi-map"))
759
- pp->msi_ext = 1;
760
-
7611155 ret = dw_pcie_host_init(pp);
7621156 if (ret) {
7631157 dev_err(dev, "failed to initialize host\n");
....@@ -796,6 +1190,8 @@
7961190 return ret;
7971191 }
7981192
1193
+ rk_pcie->pci->dbi_base2 = rk_pcie->pci->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET;
1194
+ rk_pcie->pci->atu_base = rk_pcie->pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
7991195 rk_pcie->pci->iatu_unroll_enabled = rk_pcie_iatu_unroll_enabled(rk_pcie->pci);
8001196
8011197 ret = rk_pcie_ep_atu_init(rk_pcie);
....@@ -812,52 +1208,24 @@
8121208 return ret;
8131209 }
8141210
815
- return 0;
816
-}
1211
+ if (!rk_pcie_udma_enabled(rk_pcie))
1212
+ return 0;
8171213
818
-static void rk_pcie_clk_deinit(struct rk_pcie *rk_pcie)
819
-{
820
- clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks);
821
- clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
1214
+ return 0;
8221215 }
8231216
8241217 static int rk_pcie_clk_init(struct rk_pcie *rk_pcie)
8251218 {
8261219 struct device *dev = rk_pcie->pci->dev;
827
- struct property *prop;
828
- const char *name;
829
- int i = 0, ret, count;
1220
+ int ret;
8301221
831
- count = of_property_count_strings(dev->of_node, "clock-names");
832
- if (count < 1)
1222
+ rk_pcie->clk_cnt = devm_clk_bulk_get_all(dev, &rk_pcie->clks);
1223
+ if (rk_pcie->clk_cnt < 1)
8331224 return -ENODEV;
8341225
835
- rk_pcie->clks = devm_kcalloc(dev, count,
836
- sizeof(struct clk_bulk_data),
837
- GFP_KERNEL);
838
- if (!rk_pcie->clks)
839
- return -ENOMEM;
840
-
841
- rk_pcie->clk_cnt = count;
842
-
843
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
844
- rk_pcie->clks[i].id = name;
845
- if (!rk_pcie->clks[i].id)
846
- return -ENOMEM;
847
- i++;
848
- }
849
-
850
- ret = devm_clk_bulk_get(dev, count, rk_pcie->clks);
851
- if (ret)
852
- return ret;
853
-
854
- ret = clk_bulk_prepare(count, rk_pcie->clks);
855
- if (ret)
856
- return ret;
857
-
858
- ret = clk_bulk_enable(count, rk_pcie->clks);
1226
+ ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
8591227 if (ret) {
860
- clk_bulk_unprepare(count, rk_pcie->clks);
1228
+ dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
8611229 return ret;
8621230 }
8631231
....@@ -908,6 +1276,10 @@
9081276 return PTR_ERR(rk_pcie->rst_gpio);
9091277 }
9101278
1279
+ if (device_property_read_u32(&pdev->dev, "rockchip,perst-inactive-ms",
1280
+ &rk_pcie->perst_inactive_ms))
1281
+ rk_pcie->perst_inactive_ms = 200;
1282
+
9111283 rk_pcie->prsnt_gpio = devm_gpiod_get_optional(&pdev->dev, "prsnt", GPIOD_IN);
9121284 if (IS_ERR_OR_NULL(rk_pcie->prsnt_gpio))
9131285 dev_info(&pdev->dev, "invalid prsnt-gpios property in node\n");
....@@ -920,7 +1292,7 @@
9201292 int ret;
9211293 struct device *dev = rk_pcie->pci->dev;
9221294
923
- rk_pcie->phy = devm_phy_get(dev, "pcie-phy");
1295
+ rk_pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
9241296 if (IS_ERR(rk_pcie->phy)) {
9251297 if (PTR_ERR(rk_pcie->phy) != -EPROBE_DEFER)
9261298 dev_info(dev, "missing phy\n");
....@@ -929,23 +1301,27 @@
9291301
9301302 switch (rk_pcie->mode) {
9311303 case RK_PCIE_RC_TYPE:
932
- rk_pcie->phy_mode = PHY_MODE_PCIE_RC;
1304
+ rk_pcie->phy_mode = PHY_MODE_PCIE; /* make no sense */
1305
+ rk_pcie->phy_sub_mode = PHY_MODE_PCIE_RC;
9331306 break;
9341307 case RK_PCIE_EP_TYPE:
935
- rk_pcie->phy_mode = PHY_MODE_PCIE_EP;
1308
+ rk_pcie->phy_mode = PHY_MODE_PCIE;
1309
+ rk_pcie->phy_sub_mode = PHY_MODE_PCIE_EP;
9361310 break;
9371311 }
9381312
939
- ret = phy_set_mode(rk_pcie->phy, rk_pcie->phy_mode);
1313
+ ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1314
+ rk_pcie->phy_sub_mode);
9401315 if (ret) {
9411316 dev_err(dev, "fail to set phy to mode %s, err %d\n",
942
- (rk_pcie->phy_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
1317
+ (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
9431318 ret);
9441319 return ret;
9451320 }
9461321
9471322 if (rk_pcie->bifurcation)
948
- ret = phy_set_mode(rk_pcie->phy, PHY_MODE_PCIE_BIFURCATION);
1323
+ phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1324
+ PHY_MODE_PCIE_BIFURCATION);
9491325
9501326 ret = phy_init(rk_pcie->phy);
9511327 if (ret < 0) {
....@@ -954,53 +1330,6 @@
9541330 }
9551331
9561332 phy_power_on(rk_pcie->phy);
957
-
958
- return 0;
959
-}
960
-
961
-static int rk_pcie_reset_control_release(struct rk_pcie *rk_pcie)
962
-{
963
- struct device *dev = rk_pcie->pci->dev;
964
- struct property *prop;
965
- const char *name;
966
- int ret, count, i = 0;
967
-
968
- count = of_property_count_strings(dev->of_node, "reset-names");
969
- if (count < 1)
970
- return -ENODEV;
971
-
972
- rk_pcie->rsts = devm_kcalloc(dev, count,
973
- sizeof(struct reset_bulk_data),
974
- GFP_KERNEL);
975
- if (!rk_pcie->rsts)
976
- return -ENOMEM;
977
-
978
- of_property_for_each_string(dev->of_node, "reset-names",
979
- prop, name) {
980
- rk_pcie->rsts[i].id = name;
981
- if (!rk_pcie->rsts[i].id)
982
- return -ENOMEM;
983
- i++;
984
- }
985
-
986
- for (i = 0; i < count; i++) {
987
- rk_pcie->rsts[i].rst = devm_reset_control_get_exclusive(dev,
988
- rk_pcie->rsts[i].id);
989
- if (IS_ERR_OR_NULL(rk_pcie->rsts[i].rst)) {
990
- dev_err(dev, "failed to get %s\n",
991
- rk_pcie->clks[i].id);
992
- return -PTR_ERR(rk_pcie->rsts[i].rst);
993
- }
994
- }
995
-
996
- for (i = 0; i < count; i++) {
997
- ret = reset_control_deassert(rk_pcie->rsts[i].rst);
998
- if (ret) {
999
- dev_err(dev, "failed to release %s\n",
1000
- rk_pcie->rsts[i].id);
1001
- return ret;
1002
- }
1003
- }
10041333
10051334 return 0;
10061335 }
....@@ -1215,11 +1544,23 @@
12151544 .data = &rk3528_pcie_rc_of_data,
12161545 },
12171546 {
1547
+ .compatible = "rockchip,rk3562-pcie",
1548
+ .data = &rk3528_pcie_rc_of_data,
1549
+ },
1550
+ {
12181551 .compatible = "rockchip,rk3568-pcie",
12191552 .data = &rk_pcie_rc_of_data,
12201553 },
12211554 {
12221555 .compatible = "rockchip,rk3568-pcie-ep",
1556
+ .data = &rk_pcie_ep_of_data,
1557
+ },
1558
+ {
1559
+ .compatible = "rockchip,rk3588-pcie",
1560
+ .data = &rk_pcie_rc_of_data,
1561
+ },
1562
+ {
1563
+ .compatible = "rockchip,rk3588-pcie-ep",
12231564 .data = &rk_pcie_ep_of_data,
12241565 },
12251566 {},
....@@ -1398,41 +1739,6 @@
13981739 return ret;
13991740 }
14001741
1401
-static int rk_pci_find_capability(struct rk_pcie *rk_pcie, int cap)
1402
-{
1403
- u32 header;
1404
- int ttl;
1405
- int start = 0;
1406
- int pos = PCI_CFG_SPACE_SIZE;
1407
-
1408
- /* minimum 8 bytes per capability */
1409
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1410
-
1411
- header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
1412
-
1413
- /*
1414
- * If we have no capabilities, this is indicated by cap ID,
1415
- * cap version and next pointer all being 0.
1416
- */
1417
- if (header == 0)
1418
- return 0;
1419
-
1420
- while (ttl-- > 0) {
1421
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
1422
- return pos;
1423
-
1424
- pos = PCI_EXT_CAP_NEXT(header);
1425
- if (pos < PCI_CFG_SPACE_SIZE)
1426
- break;
1427
-
1428
- header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
1429
- if (!header)
1430
- break;
1431
- }
1432
-
1433
- return 0;
1434
-}
1435
-
14361742 #define RAS_DES_EVENT(ss, v) \
14371743 do { \
14381744 dw_pcie_writel_dbi(pcie->pci, cap_base + 8, v); \
....@@ -1443,8 +1749,27 @@
14431749 {
14441750 struct rk_pcie *pcie = s->private;
14451751 int cap_base;
1752
+ u32 val = rk_pcie_readl_apb(pcie, PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN);
1753
+ char *pm;
14461754
1447
- cap_base = rk_pci_find_capability(pcie, PCI_EXT_CAP_ID_VNDR);
1755
+ if (val & BIT(6))
1756
+ pm = "In training";
1757
+ else if (val & BIT(5))
1758
+ pm = "L1.2";
1759
+ else if (val & BIT(4))
1760
+ pm = "L1.1";
1761
+ else if (val & BIT(3))
1762
+ pm = "L1";
1763
+ else if (val & BIT(2))
1764
+ pm = "L0";
1765
+ else if (val & 0x3)
1766
+ pm = (val == 0x3) ? "L0s" : (val & BIT(1) ? "RX L0s" : "TX L0s");
1767
+ else
1768
+ pm = "Invalid";
1769
+
1770
+ seq_printf(s, "Common event signal status: 0x%s\n", pm);
1771
+
1772
+ cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
14481773 if (!cap_base) {
14491774 dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
14501775 return 0;
....@@ -1480,7 +1805,6 @@
14801805
14811806 return 0;
14821807 }
1483
-
14841808 static int rockchip_pcie_rasdes_open(struct inode *inode, struct file *file)
14851809 {
14861810 return single_open(file, rockchip_pcie_rasdes_show,
....@@ -1499,7 +1823,7 @@
14991823 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
15001824 return -EFAULT;
15011825
1502
- cap_base = rk_pci_find_capability(pcie, PCI_EXT_CAP_ID_VNDR);
1826
+ cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
15031827 if (!cap_base) {
15041828 dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
15051829 return 0;
....@@ -1584,7 +1908,6 @@
15841908 const struct rk_pcie_of_data *data;
15851909 enum rk_pcie_device_mode mode;
15861910 struct device_node *np = pdev->dev.of_node;
1587
- struct platform_driver *drv = to_platform_driver(dev->driver);
15881911 u32 val = 0;
15891912 int irq;
15901913
....@@ -1638,6 +1961,8 @@
16381961 }
16391962 }
16401963
1964
+ rk_pcie->supports_clkreq = device_property_read_bool(dev, "supports-clkreq");
1965
+
16411966 retry_regulator:
16421967 /* DON'T MOVE ME: must be enable before phy init */
16431968 rk_pcie->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
....@@ -1667,11 +1992,14 @@
16671992 goto disable_vpcie3v3;
16681993 }
16691994
1670
- ret = rk_pcie_reset_control_release(rk_pcie);
1671
- if (ret) {
1672
- dev_err(dev, "reset control init failed\n");
1995
+ rk_pcie->rsts = devm_reset_control_array_get_exclusive(dev);
1996
+ if (IS_ERR(rk_pcie->rsts)) {
1997
+ ret = PTR_ERR(rk_pcie->rsts);
1998
+ dev_err(dev, "failed to get reset lines\n");
16731999 goto disable_phy;
16742000 }
2001
+
2002
+ reset_control_deassert(rk_pcie->rsts);
16752003
16762004 ret = rk_pcie_request_sys_irq(rk_pcie, pdev);
16772005 if (ret) {
....@@ -1753,7 +2081,7 @@
17532081 ret = rk_pcie_init_dma_trx(rk_pcie);
17542082 if (ret) {
17552083 dev_err(dev, "failed to add dma extension\n");
1756
- return ret;
2084
+ goto remove_irq_domain;
17572085 }
17582086
17592087 if (rk_pcie->dma_obj) {
....@@ -1771,7 +2099,9 @@
17712099 dw_pcie_dbi_ro_wr_dis(pci);
17722100
17732101 device_init_wakeup(dev, true);
1774
- drv->driver.pm = &rockchip_dw_pcie_pm_ops;
2102
+
2103
+ /* Enable async system PM for multiports SoC */
2104
+ device_enable_async_suspend(dev);
17752105
17762106 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
17772107 ret = rockchip_pcie_debugfs_init(rk_pcie);
....@@ -1779,7 +2109,7 @@
17792109 dev_err(dev, "failed to setup debugfs: %d\n", ret);
17802110
17812111 /* Enable RASDES Error event by default */
1782
- val = rk_pci_find_capability(rk_pcie, PCI_EXT_CAP_ID_VNDR);
2112
+ val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_VNDR);
17832113 if (!val) {
17842114 dev_err(dev, "Not able to find RASDES CAP!\n");
17852115 return 0;
....@@ -1798,10 +2128,9 @@
17982128 phy_power_off(rk_pcie->phy);
17992129 phy_exit(rk_pcie->phy);
18002130 deinit_clk:
1801
- rk_pcie_clk_deinit(rk_pcie);
2131
+ clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
18022132 disable_vpcie3v3:
18032133 rk_pcie_disable_power(rk_pcie);
1804
-
18052134 release_driver:
18062135 if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT))
18072136 device_release_driver(dev);
....@@ -1826,12 +2155,147 @@
18262155 return rk_pcie_really_probe(pdev);
18272156 }
18282157
2158
+#ifdef CONFIG_PCIEASPM
2159
+static void rk_pcie_downstream_dev_to_d0(struct rk_pcie *rk_pcie, bool enable)
2160
+{
2161
+ struct pcie_port *pp = &rk_pcie->pci->pp;
2162
+ struct pci_bus *child, *root_bus = NULL;
2163
+ struct pci_dev *pdev, *bridge;
2164
+ u32 val;
2165
+
2166
+ list_for_each_entry(child, &pp->bridge->bus->children, node) {
2167
+ /* Bring downstream devices to D3 if they are not already in */
2168
+ if (child->parent == pp->bridge->bus) {
2169
+ root_bus = child;
2170
+ bridge = root_bus->self;
2171
+ break;
2172
+ }
2173
+ }
2174
+
2175
+ if (!root_bus) {
2176
+ dev_err(rk_pcie->pci->dev, "Failed to find downstream devices\n");
2177
+ return;
2178
+ }
2179
+
2180
+ /* Save and restore root bus ASPM */
2181
+ if (enable) {
2182
+ if (rk_pcie->l1ss_ctl1)
2183
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1, rk_pcie->l1ss_ctl1);
2184
+
2185
+ /* rk_pcie->aspm woule be saved in advance when enable is false */
2186
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, rk_pcie->aspm);
2187
+ } else {
2188
+ val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1);
2189
+ if (val & PCI_L1SS_CTL1_L1SS_MASK)
2190
+ rk_pcie->l1ss_ctl1 = val;
2191
+ else
2192
+ rk_pcie->l1ss_ctl1 = 0;
2193
+
2194
+ val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL);
2195
+ rk_pcie->aspm = val & PCI_EXP_LNKCTL_ASPMC;
2196
+ val &= ~(PCI_EXP_LNKCAP_ASPM_L1 | PCI_EXP_LNKCAP_ASPM_L0S);
2197
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, val);
2198
+ }
2199
+
2200
+ list_for_each_entry(pdev, &root_bus->devices, bus_list) {
2201
+ if (PCI_SLOT(pdev->devfn) == 0) {
2202
+ if (pci_set_power_state(pdev, PCI_D0))
2203
+ dev_err(rk_pcie->pci->dev,
2204
+ "Failed to transition %s to D3hot state\n",
2205
+ dev_name(&pdev->dev));
2206
+ if (enable) {
2207
+ if (rk_pcie->l1ss_ctl1) {
2208
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, &val);
2209
+ val &= ~PCI_L1SS_CTL1_L1SS_MASK;
2210
+ val |= (rk_pcie->l1ss_ctl1 & PCI_L1SS_CTL1_L1SS_MASK);
2211
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, val);
2212
+ }
2213
+
2214
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
2215
+ PCI_EXP_LNKCTL_ASPMC, rk_pcie->aspm);
2216
+ } else {
2217
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2218
+ }
2219
+ }
2220
+ }
2221
+}
2222
+#endif
2223
+
18292224 static int __maybe_unused rockchip_dw_pcie_suspend(struct device *dev)
18302225 {
18312226 struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
1832
- int ret;
2227
+ int ret = 0, power;
2228
+ struct dw_pcie *pci = rk_pcie->pci;
2229
+ u32 status;
2230
+
2231
+ /*
2232
+ * This is as per PCI Express Base r5.0 r1.0 May 22-2019,
2233
+ * 5.2 Link State Power Management (Page #440).
2234
+ *
2235
+ * L2/L3 Ready entry negotiations happen while in the L0 state.
2236
+ * L2/L3 Ready are entered only after the negotiation completes.
2237
+ *
2238
+ * The following example sequence illustrates the multi-step Link state
2239
+ * transition process leading up to entering a system sleep state:
2240
+ * 1. System software directs all Functions of a Downstream component to D3Hot.
2241
+ * 2. The Downstream component then initiates the transition of the Link to L1
2242
+ * as required.
2243
+ * 3. System software then causes the Root Complex to broadcast the PME_Turn_Off
2244
+ * Message in preparation for removing the main power source.
2245
+ * 4. This Message causes the subject Link to transition back to L0 in order to
2246
+ * send it and to enable the Downstream component to respond with PME_TO_Ack.
2247
+ * 5. After sending the PME_TO_Ack, the Downstream component initiates the L2/L3
2248
+ * Ready transition protocol.
2249
+ */
2250
+
2251
+ /* 1. All sub-devices are in D3hot by PCIe stack */
2252
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
18332253
18342254 rk_pcie_link_status_clear(rk_pcie);
2255
+
2256
+ /*
2257
+ * Wlan devices will be shutdown from function driver now, so doing L2 here
2258
+ * must fail. Skip L2 routine.
2259
+ */
2260
+ if (rk_pcie->skip_scan_in_resume) {
2261
+ rfkill_get_wifi_power_state(&power);
2262
+ if (!power)
2263
+ goto no_l2;
2264
+ }
2265
+
2266
+ /* 2. Broadcast PME_Turn_Off Message */
2267
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_MSG_GEN, PME_TURN_OFF);
2268
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_MSG_GEN,
2269
+ status, !(status & BIT(4)), 20, RK_PCIE_L2_TMOUT_US);
2270
+ if (ret) {
2271
+ dev_err(dev, "Failed to send PME_Turn_Off\n");
2272
+ goto no_l2;
2273
+ }
2274
+
2275
+ /* 3. Wait for PME_TO_Ack */
2276
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_INTR_STATUS_MSG_RX,
2277
+ status, status & BIT(9), 20, RK_PCIE_L2_TMOUT_US);
2278
+ if (ret) {
2279
+ dev_err(dev, "Failed to receive PME_TO_Ack\n");
2280
+ goto no_l2;
2281
+ }
2282
+
2283
+ /* 4. Clear PME_TO_Ack and Wait for ready to enter L23 message */
2284
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MSG_RX, PME_TO_ACK);
2285
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_POWER,
2286
+ status, status & READY_ENTER_L23, 20, RK_PCIE_L2_TMOUT_US);
2287
+ if (ret) {
2288
+ dev_err(dev, "Failed to ready to enter L23\n");
2289
+ goto no_l2;
2290
+ }
2291
+
2292
+ /* 5. Check we are in L2 */
2293
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS,
2294
+ status, ((status & S_MAX) == S_L2_IDLE), 20, RK_PCIE_L2_TMOUT_US);
2295
+ if (ret)
2296
+ dev_err(pci->dev, "Link isn't in L2 idle!\n");
2297
+
2298
+no_l2:
18352299 rk_pcie_disable_ltssm(rk_pcie);
18362300
18372301 /* make sure assert phy success */
....@@ -1840,7 +2304,7 @@
18402304 phy_power_off(rk_pcie->phy);
18412305 phy_exit(rk_pcie->phy);
18422306
1843
- clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks);
2307
+ clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
18442308
18452309 rk_pcie->in_suspend = true;
18462310
....@@ -1856,20 +2320,25 @@
18562320 bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
18572321 int ret;
18582322
2323
+ reset_control_assert(rk_pcie->rsts);
2324
+ udelay(10);
2325
+ reset_control_deassert(rk_pcie->rsts);
2326
+
18592327 ret = rk_pcie_enable_power(rk_pcie);
18602328 if (ret)
18612329 return ret;
18622330
1863
- ret = clk_bulk_enable(rk_pcie->clk_cnt, rk_pcie->clks);
2331
+ ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
18642332 if (ret) {
1865
- clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
2333
+ dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
18662334 return ret;
18672335 }
18682336
1869
- ret = phy_set_mode(rk_pcie->phy, rk_pcie->phy_mode);
2337
+ ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
2338
+ rk_pcie->phy_sub_mode);
18702339 if (ret) {
18712340 dev_err(dev, "fail to set phy to mode %s, err %d\n",
1872
- (rk_pcie->phy_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
2341
+ (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
18732342 ret);
18742343 return ret;
18752344 }
....@@ -1937,7 +2406,33 @@
19372406 return ret;
19382407 }
19392408
2409
+#ifdef CONFIG_PCIEASPM
2410
+static int rockchip_dw_pcie_prepare(struct device *dev)
2411
+{
2412
+ struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2413
+
2414
+ dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2415
+ rk_pcie_downstream_dev_to_d0(rk_pcie, false);
2416
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2417
+
2418
+ return 0;
2419
+}
2420
+
2421
+static void rockchip_dw_pcie_complete(struct device *dev)
2422
+{
2423
+ struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2424
+
2425
+ dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2426
+ rk_pcie_downstream_dev_to_d0(rk_pcie, true);
2427
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2428
+}
2429
+#endif
2430
+
19402431 static const struct dev_pm_ops rockchip_dw_pcie_pm_ops = {
2432
+#ifdef CONFIG_PCIEASPM
2433
+ .prepare = rockchip_dw_pcie_prepare,
2434
+ .complete = rockchip_dw_pcie_complete,
2435
+#endif
19412436 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_dw_pcie_suspend,
19422437 rockchip_dw_pcie_resume)
19432438 };
....@@ -1947,10 +2442,12 @@
19472442 .name = "rk-pcie",
19482443 .of_match_table = rk_pcie_of_match,
19492444 .suppress_bind_attrs = true,
2445
+ .pm = &rockchip_dw_pcie_pm_ops,
19502446 },
2447
+ .probe = rk_pcie_probe,
19512448 };
19522449
1953
-module_platform_driver_probe(rk_plat_pcie_driver, rk_pcie_probe);
2450
+module_platform_driver(rk_plat_pcie_driver);
19542451
19552452 MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com>");
19562453 MODULE_DESCRIPTION("RockChip PCIe Controller driver");