forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/drivers/pci/controller/dwc/pcie-dw-rockchip.c
....@@ -14,6 +14,7 @@
1414 #include <linux/gpio.h>
1515 #include <linux/init.h>
1616 #include <linux/interrupt.h>
17
+#include <linux/iopoll.h>
1718 #include <linux/irq.h>
1819 #include <linux/irqchip/chained_irq.h>
1920 #include <linux/irqdomain.h>
....@@ -29,11 +30,13 @@
2930 #include <linux/of_pci.h>
3031 #include <linux/pci.h>
3132 #include <linux/phy/phy.h>
33
+#include <linux/phy/pcie.h>
3234 #include <linux/platform_device.h>
3335 #include <linux/poll.h>
3436 #include <linux/regmap.h>
3537 #include <linux/reset.h>
3638 #include <linux/resource.h>
39
+#include <linux/rfkill-wlan.h>
3740 #include <linux/signal.h>
3841 #include <linux/types.h>
3942 #include <linux/uaccess.h>
....@@ -49,13 +52,11 @@
4952 RK_PCIE_RC_TYPE,
5053 };
5154
52
-struct reset_bulk_data {
53
- const char *id;
54
- struct reset_control *rst;
55
-};
55
+#define RK_PCIE_DBG 0
5656
5757 #define PCIE_DMA_OFFSET 0x380000
5858
59
+#define PCIE_DMA_CTRL_OFF 0x8
5960 #define PCIE_DMA_WR_ENB 0xc
6061 #define PCIE_DMA_WR_CTRL_LO 0x200
6162 #define PCIE_DMA_WR_CTRL_HI 0x204
....@@ -99,6 +100,8 @@
99100
100101 #define PCIE_CAP_LINK_CONTROL2_LINK_STATUS 0xa0
101102
103
+#define PCIE_CLIENT_INTR_STATUS_MSG_RX 0x04
104
+#define PME_TO_ACK (BIT(9) | BIT(25))
102105 #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x08
103106 #define PCIE_CLIENT_INTR_STATUS_MISC 0x10
104107 #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
....@@ -106,12 +109,21 @@
106109 #define MASK_LEGACY_INT(x) (0x00110011 << x)
107110 #define UNMASK_LEGACY_INT(x) (0x00110000 << x)
108111 #define PCIE_CLIENT_INTR_MASK 0x24
112
+#define PCIE_CLIENT_POWER 0x2c
113
+#define READY_ENTER_L23 BIT(3)
114
+#define PCIE_CLIENT_MSG_GEN 0x34
115
+#define PME_TURN_OFF (BIT(4) | BIT(20))
109116 #define PCIE_CLIENT_GENERAL_DEBUG 0x104
110117 #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
118
+#define PCIE_LTSSM_APP_DLY1_EN BIT(0)
119
+#define PCIE_LTSSM_APP_DLY2_EN BIT(1)
120
+#define PCIE_LTSSM_APP_DLY1_DONE BIT(2)
121
+#define PCIE_LTSSM_APP_DLY2_DONE BIT(3)
111122 #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
112123 #define PCIE_CLIENT_LTSSM_STATUS 0x300
113124 #define SMLH_LINKUP BIT(16)
114125 #define RDLH_LINKUP BIT(17)
126
+#define PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN 0x154
115127 #define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310
116128 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320
117129 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324
....@@ -119,21 +131,31 @@
119131 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c
120132 #define PCIE_CLIENT_DBG_FIFO_STATUS 0x350
121133 #define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000
122
-#define PCIE_CLIENT_DBF_EN 0xffff0003
123
-#define RK_PCIE_DBG 0
134
+#define PCIE_CLIENT_DBF_EN 0xffff0007
124135
125136 #define PCIE_PHY_LINKUP BIT(0)
126137 #define PCIE_DATA_LINKUP BIT(1)
127138
128
-#define PCIE_RESBAR_CTRL_REG0_REG 0x2a8
139
+#define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000
129140 #define PCIE_SB_BAR0_MASK_REG 0x100010
130141
131142 #define PCIE_PL_ORDER_RULE_CTRL_OFF 0x8B4
143
+#define RK_PCIE_L2_TMOUT_US 5000
144
+#define RK_PCIE_HOTRESET_TMOUT_US 10000
145
+
146
+enum rk_pcie_ltssm_code {
147
+ S_L0 = 0x11,
148
+ S_L0S = 0x12,
149
+ S_L1_IDLE = 0x14,
150
+ S_L2_IDLE = 0x15,
151
+ S_MAX = 0x1f,
152
+};
132153
133154 struct rk_pcie {
134155 struct dw_pcie *pci;
135156 enum rk_pcie_device_mode mode;
136157 enum phy_mode phy_mode;
158
+ int phy_sub_mode;
137159 unsigned char bar_to_atu[6];
138160 phys_addr_t *outbound_addr;
139161 unsigned long *ib_window_map;
....@@ -144,9 +166,11 @@
144166 void __iomem *apb_base;
145167 struct phy *phy;
146168 struct clk_bulk_data *clks;
169
+ struct reset_control *rsts;
147170 unsigned int clk_cnt;
148
- struct reset_bulk_data *rsts;
149171 struct gpio_desc *rst_gpio;
172
+ u32 perst_inactive_ms;
173
+ struct gpio_desc *prsnt_gpio;
150174 phys_addr_t mem_start;
151175 size_t mem_size;
152176 struct pcie_port pp;
....@@ -154,21 +178,30 @@
154178 struct regmap *pmu_grf;
155179 struct dma_trx_obj *dma_obj;
156180 bool in_suspend;
181
+ bool skip_scan_in_resume;
157182 bool is_rk1808;
158183 bool is_signal_test;
159184 bool bifurcation;
185
+ bool supports_clkreq;
160186 struct regulator *vpcie3v3;
161187 struct irq_domain *irq_domain;
162188 raw_spinlock_t intx_lock;
189
+ u16 aspm;
190
+ u32 l1ss_ctl1;
163191 struct dentry *debugfs;
192
+ u32 msi_vector_num;
193
+ struct workqueue_struct *hot_rst_wq;
194
+ struct work_struct hot_rst_work;
195
+ u32 comp_prst[2];
196
+ u32 intx;
164197 };
165198
166199 struct rk_pcie_of_data {
167200 enum rk_pcie_device_mode mode;
201
+ u32 msi_vector_num;
168202 };
169203
170204 #define to_rk_pcie(x) dev_get_drvdata((x)->dev)
171
-static const struct dev_pm_ops rockchip_dw_pcie_pm_ops;
172205
173206 static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
174207 {
....@@ -253,12 +286,154 @@
253286 return 0;
254287 }
255288
289
+static void rk_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
290
+{
291
+ int ret;
292
+
293
+ if (pci->ops->write_dbi) {
294
+ pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
295
+ return;
296
+ }
297
+
298
+ ret = dw_pcie_write(pci->atu_base + reg, 4, val);
299
+ if (ret)
300
+ dev_err(pci->dev, "Write ATU address failed\n");
301
+}
302
+
303
+static void rk_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
304
+ u32 val)
305
+{
306
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
307
+
308
+ rk_pcie_writel_atu(pci, offset + reg, val);
309
+}
310
+
311
+static u32 rk_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
312
+{
313
+ int ret;
314
+ u32 val;
315
+
316
+ if (pci->ops->read_dbi)
317
+ return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
318
+
319
+ ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
320
+ if (ret)
321
+ dev_err(pci->dev, "Read ATU address failed\n");
322
+
323
+ return val;
324
+}
325
+
326
+static u32 rk_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
327
+{
328
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
329
+
330
+ return rk_pcie_readl_atu(pci, offset + reg);
331
+}
332
+
333
+static int rk_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
334
+ int index, int bar, u64 cpu_addr,
335
+ enum dw_pcie_as_type as_type)
336
+{
337
+ int type;
338
+ u32 retries, val;
339
+
340
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
341
+ lower_32_bits(cpu_addr));
342
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
343
+ upper_32_bits(cpu_addr));
344
+
345
+ switch (as_type) {
346
+ case DW_PCIE_AS_MEM:
347
+ type = PCIE_ATU_TYPE_MEM;
348
+ break;
349
+ case DW_PCIE_AS_IO:
350
+ type = PCIE_ATU_TYPE_IO;
351
+ break;
352
+ default:
353
+ return -EINVAL;
354
+ }
355
+
356
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
357
+ PCIE_ATU_FUNC_NUM(func_no));
358
+ rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
359
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
360
+ PCIE_ATU_ENABLE |
361
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
362
+
363
+ /*
364
+ * Make sure ATU enable takes effect before any subsequent config
365
+ * and I/O accesses.
366
+ */
367
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
368
+ val = rk_pcie_readl_ib_unroll(pci, index,
369
+ PCIE_ATU_UNR_REGION_CTRL2);
370
+ if (val & PCIE_ATU_ENABLE)
371
+ return 0;
372
+
373
+ mdelay(LINK_WAIT_IATU);
374
+ }
375
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
376
+
377
+ return -EBUSY;
378
+}
379
+
380
+
381
+static int rk_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
382
+ int bar, u64 cpu_addr,
383
+ enum dw_pcie_as_type as_type)
384
+{
385
+ int type;
386
+ u32 retries, val;
387
+
388
+ if (pci->iatu_unroll_enabled)
389
+ return rk_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
390
+ cpu_addr, as_type);
391
+
392
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
393
+ index);
394
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
395
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
396
+
397
+ switch (as_type) {
398
+ case DW_PCIE_AS_MEM:
399
+ type = PCIE_ATU_TYPE_MEM;
400
+ break;
401
+ case DW_PCIE_AS_IO:
402
+ type = PCIE_ATU_TYPE_IO;
403
+ break;
404
+ default:
405
+ return -EINVAL;
406
+ }
407
+
408
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
409
+ PCIE_ATU_FUNC_NUM(func_no));
410
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
411
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
412
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
413
+
414
+ /*
415
+ * Make sure ATU enable takes effect before any subsequent config
416
+ * and I/O accesses.
417
+ */
418
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
419
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
420
+ if (val & PCIE_ATU_ENABLE)
421
+ return 0;
422
+
423
+ mdelay(LINK_WAIT_IATU);
424
+ }
425
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
426
+
427
+ return -EBUSY;
428
+}
429
+
256430 static int rk_pcie_ep_inbound_atu(struct rk_pcie *rk_pcie,
257431 enum pci_barno bar, dma_addr_t cpu_addr,
258432 enum dw_pcie_as_type as_type)
259433 {
260434 int ret;
261435 u32 free_win;
436
+ u8 func_no = 0x0;
262437
263438 if (rk_pcie->in_suspend) {
264439 free_win = rk_pcie->bar_to_atu[bar];
....@@ -271,8 +446,8 @@
271446 }
272447 }
273448
274
- ret = dw_pcie_prog_inbound_atu(rk_pcie->pci, free_win, bar, cpu_addr,
275
- as_type);
449
+ ret = rk_pcie_prog_inbound_atu(rk_pcie->pci, func_no, free_win, bar,
450
+ cpu_addr, as_type);
276451 if (ret < 0) {
277452 dev_err(rk_pcie->pci->dev, "Failed to program IB window\n");
278453 return ret;
....@@ -285,6 +460,105 @@
285460 set_bit(free_win, rk_pcie->ib_window_map);
286461
287462 return 0;
463
+}
464
+
465
+static void rk_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
466
+ u32 val)
467
+{
468
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
469
+
470
+ rk_pcie_writel_atu(pci, offset + reg, val);
471
+}
472
+
473
+static u32 rk_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
474
+{
475
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
476
+
477
+ return rk_pcie_readl_atu(pci, offset + reg);
478
+}
479
+
480
+static void rk_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
481
+ int index, int type,
482
+ u64 cpu_addr, u64 pci_addr,
483
+ u32 size)
484
+{
485
+ u32 retries, val;
486
+ u64 limit_addr = cpu_addr + size - 1;
487
+
488
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
489
+ lower_32_bits(cpu_addr));
490
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
491
+ upper_32_bits(cpu_addr));
492
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
493
+ lower_32_bits(limit_addr));
494
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
495
+ upper_32_bits(limit_addr));
496
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
497
+ lower_32_bits(pci_addr));
498
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
499
+ upper_32_bits(pci_addr));
500
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
501
+ type | PCIE_ATU_FUNC_NUM(func_no));
502
+ rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
503
+ PCIE_ATU_ENABLE);
504
+
505
+ /*
506
+ * Make sure ATU enable takes effect before any subsequent config
507
+ * and I/O accesses.
508
+ */
509
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
510
+ val = rk_pcie_readl_ob_unroll(pci, index,
511
+ PCIE_ATU_UNR_REGION_CTRL2);
512
+ if (val & PCIE_ATU_ENABLE)
513
+ return;
514
+
515
+ mdelay(LINK_WAIT_IATU);
516
+ }
517
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
518
+}
519
+
520
+static void rk_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
521
+ int type, u64 cpu_addr, u64 pci_addr, u32 size)
522
+{
523
+ u32 retries, val;
524
+
525
+ if (pci->ops->cpu_addr_fixup)
526
+ cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
527
+
528
+ if (pci->iatu_unroll_enabled) {
529
+ rk_pcie_prog_outbound_atu_unroll(pci, 0x0, index, type,
530
+ cpu_addr, pci_addr, size);
531
+ return;
532
+ }
533
+
534
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
535
+ PCIE_ATU_REGION_OUTBOUND | index);
536
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
537
+ lower_32_bits(cpu_addr));
538
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
539
+ upper_32_bits(cpu_addr));
540
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
541
+ lower_32_bits(cpu_addr + size - 1));
542
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
543
+ lower_32_bits(pci_addr));
544
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
545
+ upper_32_bits(pci_addr));
546
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
547
+ PCIE_ATU_FUNC_NUM(0x0));
548
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
549
+
550
+ /*
551
+ * Make sure ATU enable takes effect before any subsequent config
552
+ * and I/O accesses.
553
+ */
554
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
555
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
556
+ if (val & PCIE_ATU_ENABLE)
557
+ return;
558
+
559
+ mdelay(LINK_WAIT_IATU);
560
+ }
561
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
288562 }
289563
290564 static int rk_pcie_ep_outbound_atu(struct rk_pcie *rk_pcie,
....@@ -305,7 +579,7 @@
305579 }
306580 }
307581
308
- dw_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
582
+ rk_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
309583 phys_addr, pci_addr, size);
310584
311585 if (rk_pcie->in_suspend)
....@@ -362,6 +636,28 @@
362636 return 0;
363637 }
364638
639
+#if defined(CONFIG_PCIEASPM)
640
+static void disable_aspm_l1ss(struct rk_pcie *rk_pcie)
641
+{
642
+ u32 val, cfg_link_cap_l1sub;
643
+
644
+ val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_L1SS);
645
+ if (!val) {
646
+ dev_err(rk_pcie->pci->dev, "can't find l1ss cap\n");
647
+
648
+ return;
649
+ }
650
+
651
+ cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
652
+
653
+ val = dw_pcie_readl_dbi(rk_pcie->pci, cfg_link_cap_l1sub);
654
+ val &= ~(PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2 | PCI_L1SS_CAP_L1_PM_SS);
655
+ dw_pcie_writel_dbi(rk_pcie->pci, cfg_link_cap_l1sub, val);
656
+}
657
+#else
658
+static inline void disable_aspm_l1ss(struct rk_pcie *rk_pcie) { return; }
659
+#endif
660
+
365661 static inline void rk_pcie_set_mode(struct rk_pcie *rk_pcie)
366662 {
367663 switch (rk_pcie->mode) {
....@@ -369,6 +665,14 @@
369665 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00000);
370666 break;
371667 case RK_PCIE_RC_TYPE:
668
+ if (rk_pcie->supports_clkreq) {
669
+ /* Application is ready to have reference clock removed */
670
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x00010001);
671
+ } else {
672
+ /* Pull down CLKREQ# to assert the connecting CLOCK_GEN OE */
673
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x30011000);
674
+ disable_aspm_l1ss(rk_pcie);
675
+ }
372676 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00040);
373677 /*
374678 * Disable order rule for CPL can't pass halted P queue.
....@@ -405,13 +709,11 @@
405709
406710 if (rk_pcie->is_rk1808) {
407711 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG);
408
- if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3 &&
409
- ((val & GENMASK(15, 10)) >> 10) == 0x11)
712
+ if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3)
410713 return 1;
411714 } else {
412715 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS);
413
- if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000 &&
414
- (val & GENMASK(5, 0)) == 0x11)
716
+ if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000)
415717 return 1;
416718 }
417719
....@@ -420,7 +722,8 @@
420722
421723 static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie)
422724 {
423
-#if RK_PCIE_DBG
725
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
726
+ return;
424727 if (rk_pcie->is_rk1808 == true)
425728 return;
426729
....@@ -434,7 +737,6 @@
434737 PCIE_CLIENT_DBG_TRANSITION_DATA);
435738 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON,
436739 PCIE_CLIENT_DBF_EN);
437
-#endif
438740 }
439741
440742 static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie)
....@@ -453,13 +755,22 @@
453755
454756 static int rk_pcie_establish_link(struct dw_pcie *pci)
455757 {
456
- int retries;
758
+ int retries, power;
457759 struct rk_pcie *rk_pcie = to_rk_pcie(pci);
760
+ bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
458761
459
- if (dw_pcie_link_up(pci)) {
762
+ /*
763
+ * For standard RC, even if the link has been setup by firmware,
764
+ * we still need to reset link as we need to remove all resource info
765
+ * from devices, for instance BAR, as it wasn't assigned by kernel.
766
+ */
767
+ if (dw_pcie_link_up(pci) && !std_rc) {
460768 dev_err(pci->dev, "link is already up\n");
461769 return 0;
462770 }
771
+
772
+ /* Rest the device */
773
+ gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
463774
464775 rk_pcie_disable_ltssm(rk_pcie);
465776 rk_pcie_link_status_clear(rk_pcie);
....@@ -472,16 +783,38 @@
472783 rk_pcie_enable_ltssm(rk_pcie);
473784
474785 /*
786
+ * In resume routine, function devices' resume function must be late after
787
+ * controllers'. Some devices, such as Wi-Fi, need special IO setting before
788
+ * finishing training. So there must be timeout here. These kinds of devices
789
+ * need rescan devices by its driver when used. So no need to waste time waiting
790
+ * for training pass.
791
+ */
792
+ if (rk_pcie->in_suspend && rk_pcie->skip_scan_in_resume) {
793
+ rfkill_get_wifi_power_state(&power);
794
+ if (!power) {
795
+ gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
796
+ return 0;
797
+ }
798
+ }
799
+
800
+ /*
475801 * PCIe requires the refclk to be stable for 100µs prior to releasing
476802 * PERST and T_PVPERL (Power stable to PERST# inactive) should be a
477803 * minimum of 100ms. See table 2-4 in section 2.6.2 AC, the PCI Express
478804 * Card Electromechanical Specification 3.0. So 100ms in total is the min
479
- * requuirement here. We add a 1s for sake of hoping everthings work fine.
805
+ * requuirement here. We add a 200ms by default for sake of hoping everthings
806
+ * work fine. If it doesn't, please add more in DT node by add rockchip,perst-inactive-ms.
480807 */
481
- msleep(1000);
808
+ msleep(rk_pcie->perst_inactive_ms);
482809 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
483810
484
- for (retries = 0; retries < 10; retries++) {
811
+ /*
812
+ * Add this 1ms delay because we observe link is always up stably after it and
813
+ * could help us save 20ms for scanning devices.
814
+ */
815
+ usleep_range(1000, 1100);
816
+
817
+ for (retries = 0; retries < 100; retries++) {
485818 if (dw_pcie_link_up(pci)) {
486819 /*
487820 * We may be here in case of L0 in Gen1. But if EP is capable
....@@ -490,17 +823,20 @@
490823 * that LTSSM max timeout is 24ms per period, we can wait a bit
491824 * more for Gen switch.
492825 */
493
- msleep(100);
494
- dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
495
- rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
496
- rk_pcie_debug_dump(rk_pcie);
497
- return 0;
826
+ msleep(50);
827
+ /* In case link drop after linkup, double check it */
828
+ if (dw_pcie_link_up(pci)) {
829
+ dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
830
+ rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
831
+ rk_pcie_debug_dump(rk_pcie);
832
+ return 0;
833
+ }
498834 }
499835
500836 dev_info_ratelimited(pci->dev, "PCIe Linking... LTSSM is 0x%x\n",
501837 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
502838 rk_pcie_debug_dump(rk_pcie);
503
- msleep(1000);
839
+ msleep(20);
504840 }
505841
506842 dev_err(pci->dev, "PCIe Link Fail\n");
....@@ -508,20 +844,31 @@
508844 return rk_pcie->is_signal_test == true ? 0 : -EINVAL;
509845 }
510846
847
+static bool rk_pcie_udma_enabled(struct rk_pcie *rk_pcie)
848
+{
849
+ return dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
850
+ PCIE_DMA_CTRL_OFF);
851
+}
852
+
511853 static int rk_pcie_init_dma_trx(struct rk_pcie *rk_pcie)
512854 {
855
+ if (!rk_pcie_udma_enabled(rk_pcie))
856
+ return 0;
857
+
513858 rk_pcie->dma_obj = rk_pcie_dma_obj_probe(rk_pcie->pci->dev);
514859 if (IS_ERR(rk_pcie->dma_obj)) {
515860 dev_err(rk_pcie->pci->dev, "failed to prepare dma object\n");
516861 return -EINVAL;
862
+ } else if (rk_pcie->dma_obj) {
863
+ goto out;
517864 }
518865
519
- rk_pcie->dma_obj = pcie_dw_dmatest_register(rk_pcie->pci, true);
866
+ rk_pcie->dma_obj = pcie_dw_dmatest_register(rk_pcie->pci->dev, true);
520867 if (IS_ERR(rk_pcie->dma_obj)) {
521868 dev_err(rk_pcie->pci->dev, "failed to prepare dmatest\n");
522869 return -EINVAL;
523870 }
524
-
871
+out:
525872 /* Enable client write and read interrupt */
526873 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
527874
....@@ -534,6 +881,76 @@
534881 return 0;
535882 }
536883
884
+static int rk_pci_find_resbar_capability(struct rk_pcie *rk_pcie)
885
+{
886
+ u32 header;
887
+ int ttl;
888
+ int start = 0;
889
+ int pos = PCI_CFG_SPACE_SIZE;
890
+ int cap = PCI_EXT_CAP_ID_REBAR;
891
+
892
+ /* minimum 8 bytes per capability */
893
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
894
+
895
+ header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
896
+
897
+ /*
898
+ * If we have no capabilities, this is indicated by cap ID,
899
+ * cap version and next pointer all being 0.
900
+ */
901
+ if (header == 0)
902
+ return 0;
903
+
904
+ while (ttl-- > 0) {
905
+ if (PCI_EXT_CAP_ID(header) == cap && pos != start)
906
+ return pos;
907
+
908
+ pos = PCI_EXT_CAP_NEXT(header);
909
+ if (pos < PCI_CFG_SPACE_SIZE)
910
+ break;
911
+
912
+ header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
913
+ if (!header)
914
+ break;
915
+ }
916
+
917
+ return 0;
918
+}
919
+
920
+#ifdef MODULE
921
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
922
+{
923
+ int ret;
924
+
925
+ if (pci->ops && pci->ops->write_dbi2) {
926
+ pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
927
+ return;
928
+ }
929
+
930
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
931
+ if (ret)
932
+ dev_err(pci->dev, "write DBI address failed\n");
933
+}
934
+#endif
935
+
936
+static int rk_pcie_ep_set_bar_flag(struct rk_pcie *rk_pcie, enum pci_barno barno, int flags)
937
+{
938
+ enum pci_barno bar = barno;
939
+ u32 reg;
940
+
941
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
942
+
943
+ /* Disabled the upper 32bits BAR to make a 64bits bar pair */
944
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
945
+ dw_pcie_writel_dbi2(rk_pcie->pci, reg + 4, 0);
946
+
947
+ dw_pcie_writel_dbi(rk_pcie->pci, reg, flags);
948
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
949
+ dw_pcie_writel_dbi(rk_pcie->pci, reg + 4, 0);
950
+
951
+ return 0;
952
+}
953
+
537954 static void rk_pcie_ep_setup(struct rk_pcie *rk_pcie)
538955 {
539956 int ret;
....@@ -541,6 +958,8 @@
541958 u32 lanes;
542959 struct device *dev = rk_pcie->pci->dev;
543960 struct device_node *np = dev->of_node;
961
+ int resbar_base;
962
+ int bar;
544963
545964 /* Enable client write and read interrupt */
546965 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
....@@ -604,17 +1023,36 @@
6041023 /* Enable bus master and memory space */
6051024 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_STATUS_COMMAND_REG, 0x6);
6061025
607
- /* Resize BAR0 to 4GB */
608
- /* bit13-8 set to 6 means 64MB */
609
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_RESBAR_CTRL_REG0_REG, 0x600);
1026
+ resbar_base = rk_pci_find_resbar_capability(rk_pcie);
1027
+ if (!resbar_base) {
1028
+ dev_warn(dev, "failed to find resbar_base\n");
1029
+ } else {
1030
+ /* Resize BAR0 to support 512GB, BAR1 to support 8M, BAR2~5 to support 64M */
1031
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4, 0xfffff0);
1032
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8, 0x13c0);
1033
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0xc, 0xfffff0);
1034
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x10, 0x3c0);
1035
+ for (bar = 2; bar < 6; bar++) {
1036
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
1037
+ dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8 + bar * 0x8, 0x6c0);
1038
+ }
6101039
611
- /* Set shadow BAR0 according 64MB */
612
- val = rk_pcie->mem_size - 1;
613
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
1040
+ /* Set flags */
1041
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_0, PCI_BASE_ADDRESS_MEM_TYPE_32);
1042
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_1, PCI_BASE_ADDRESS_MEM_TYPE_32);
1043
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_2, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
1044
+ rk_pcie_ep_set_bar_flag(rk_pcie, BAR_4, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
1045
+ }
6141046
615
- /* Set reserved memory address to BAR0 */
616
- dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_BAR0_REG,
617
- rk_pcie->mem_start);
1047
+ /* Device id and class id needed for request bar address */
1048
+ dw_pcie_writew_dbi(rk_pcie->pci, PCI_DEVICE_ID, 0x356a);
1049
+ dw_pcie_writew_dbi(rk_pcie->pci, PCI_CLASS_DEVICE, 0x0580);
1050
+
1051
+ /* Set shadow BAR0 */
1052
+ if (rk_pcie->is_rk1808) {
1053
+ val = rk_pcie->mem_size - 1;
1054
+ dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
1055
+ }
6181056 }
6191057
6201058 static int rk_pcie_ep_win_parse(struct rk_pcie *rk_pcie)
....@@ -675,12 +1113,24 @@
6751113 return 0;
6761114 }
6771115
1116
+static void rk_pcie_msi_set_num_vectors(struct pcie_port *pp)
1117
+{
1118
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1119
+ struct rk_pcie *rk_pcie = to_rk_pcie(pci);
1120
+
1121
+ pp->num_vectors = rk_pcie->msi_vector_num;
1122
+}
1123
+
6781124 static int rk_pcie_host_init(struct pcie_port *pp)
6791125 {
6801126 int ret;
6811127 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
6821128
6831129 dw_pcie_setup_rc(pp);
1130
+
1131
+ /* Disable BAR0 BAR1 */
1132
+ dw_pcie_writel_dbi(pci, PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + BAR_0 * 4, 0);
1133
+ dw_pcie_writel_dbi(pci, PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + BAR_1 * 4, 0);
6841134
6851135 ret = rk_pcie_establish_link(pci);
6861136
....@@ -707,13 +1157,13 @@
7071157 if (pp->msi_irq < 0) {
7081158 dev_info(dev, "use outband MSI support");
7091159 rk_pcie_host_ops.msi_host_init = rk_pcie_msi_host_init;
1160
+ } else {
1161
+ dev_info(dev, "max MSI vector is %d\n", rk_pcie->msi_vector_num);
1162
+ rk_pcie_host_ops.set_num_vectors = rk_pcie_msi_set_num_vectors;
7101163 }
7111164 }
7121165
7131166 pp->ops = &rk_pcie_host_ops;
714
-
715
- if (device_property_read_bool(dev, "msi-map"))
716
- pp->msi_ext = 1;
7171167
7181168 ret = dw_pcie_host_init(pp);
7191169 if (ret) {
....@@ -753,6 +1203,8 @@
7531203 return ret;
7541204 }
7551205
1206
+ rk_pcie->pci->dbi_base2 = rk_pcie->pci->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET;
1207
+ rk_pcie->pci->atu_base = rk_pcie->pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
7561208 rk_pcie->pci->iatu_unroll_enabled = rk_pcie_iatu_unroll_enabled(rk_pcie->pci);
7571209
7581210 ret = rk_pcie_ep_atu_init(rk_pcie);
....@@ -769,52 +1221,24 @@
7691221 return ret;
7701222 }
7711223
772
- return 0;
773
-}
1224
+ if (!rk_pcie_udma_enabled(rk_pcie))
1225
+ return 0;
7741226
775
-static void rk_pcie_clk_deinit(struct rk_pcie *rk_pcie)
776
-{
777
- clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks);
778
- clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
1227
+ return 0;
7791228 }
7801229
7811230 static int rk_pcie_clk_init(struct rk_pcie *rk_pcie)
7821231 {
7831232 struct device *dev = rk_pcie->pci->dev;
784
- struct property *prop;
785
- const char *name;
786
- int i = 0, ret, count;
1233
+ int ret;
7871234
788
- count = of_property_count_strings(dev->of_node, "clock-names");
789
- if (count < 1)
1235
+ rk_pcie->clk_cnt = devm_clk_bulk_get_all(dev, &rk_pcie->clks);
1236
+ if (rk_pcie->clk_cnt < 1)
7901237 return -ENODEV;
7911238
792
- rk_pcie->clks = devm_kcalloc(dev, count,
793
- sizeof(struct clk_bulk_data),
794
- GFP_KERNEL);
795
- if (!rk_pcie->clks)
796
- return -ENOMEM;
797
-
798
- rk_pcie->clk_cnt = count;
799
-
800
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
801
- rk_pcie->clks[i].id = name;
802
- if (!rk_pcie->clks[i].id)
803
- return -ENOMEM;
804
- i++;
805
- }
806
-
807
- ret = devm_clk_bulk_get(dev, count, rk_pcie->clks);
808
- if (ret)
809
- return ret;
810
-
811
- ret = clk_bulk_prepare(count, rk_pcie->clks);
812
- if (ret)
813
- return ret;
814
-
815
- ret = clk_bulk_enable(count, rk_pcie->clks);
1239
+ ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
8161240 if (ret) {
817
- clk_bulk_unprepare(count, rk_pcie->clks);
1241
+ dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
8181242 return ret;
8191243 }
8201244
....@@ -865,6 +1289,14 @@
8651289 return PTR_ERR(rk_pcie->rst_gpio);
8661290 }
8671291
1292
+ if (device_property_read_u32(&pdev->dev, "rockchip,perst-inactive-ms",
1293
+ &rk_pcie->perst_inactive_ms))
1294
+ rk_pcie->perst_inactive_ms = 200;
1295
+
1296
+ rk_pcie->prsnt_gpio = devm_gpiod_get_optional(&pdev->dev, "prsnt", GPIOD_IN);
1297
+ if (IS_ERR_OR_NULL(rk_pcie->prsnt_gpio))
1298
+ dev_info(&pdev->dev, "invalid prsnt-gpios property in node\n");
1299
+
8681300 return 0;
8691301 }
8701302
....@@ -873,7 +1305,7 @@
8731305 int ret;
8741306 struct device *dev = rk_pcie->pci->dev;
8751307
876
- rk_pcie->phy = devm_phy_get(dev, "pcie-phy");
1308
+ rk_pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
8771309 if (IS_ERR(rk_pcie->phy)) {
8781310 if (PTR_ERR(rk_pcie->phy) != -EPROBE_DEFER)
8791311 dev_info(dev, "missing phy\n");
....@@ -882,23 +1314,27 @@
8821314
8831315 switch (rk_pcie->mode) {
8841316 case RK_PCIE_RC_TYPE:
885
- rk_pcie->phy_mode = PHY_MODE_PCIE_RC;
1317
+ rk_pcie->phy_mode = PHY_MODE_PCIE; /* make no sense */
1318
+ rk_pcie->phy_sub_mode = PHY_MODE_PCIE_RC;
8861319 break;
8871320 case RK_PCIE_EP_TYPE:
888
- rk_pcie->phy_mode = PHY_MODE_PCIE_EP;
1321
+ rk_pcie->phy_mode = PHY_MODE_PCIE;
1322
+ rk_pcie->phy_sub_mode = PHY_MODE_PCIE_EP;
8891323 break;
8901324 }
8911325
892
- ret = phy_set_mode(rk_pcie->phy, rk_pcie->phy_mode);
1326
+ ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1327
+ rk_pcie->phy_sub_mode);
8931328 if (ret) {
8941329 dev_err(dev, "fail to set phy to mode %s, err %d\n",
895
- (rk_pcie->phy_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
1330
+ (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
8961331 ret);
8971332 return ret;
8981333 }
8991334
9001335 if (rk_pcie->bifurcation)
901
- ret = phy_set_mode(rk_pcie->phy, PHY_MODE_PCIE_BIFURCATION);
1336
+ phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1337
+ PHY_MODE_PCIE_BIFURCATION);
9021338
9031339 ret = phy_init(rk_pcie->phy);
9041340 if (ret < 0) {
....@@ -907,53 +1343,6 @@
9071343 }
9081344
9091345 phy_power_on(rk_pcie->phy);
910
-
911
- return 0;
912
-}
913
-
914
-static int rk_pcie_reset_control_release(struct rk_pcie *rk_pcie)
915
-{
916
- struct device *dev = rk_pcie->pci->dev;
917
- struct property *prop;
918
- const char *name;
919
- int ret, count, i = 0;
920
-
921
- count = of_property_count_strings(dev->of_node, "reset-names");
922
- if (count < 1)
923
- return -ENODEV;
924
-
925
- rk_pcie->rsts = devm_kcalloc(dev, count,
926
- sizeof(struct reset_bulk_data),
927
- GFP_KERNEL);
928
- if (!rk_pcie->rsts)
929
- return -ENOMEM;
930
-
931
- of_property_for_each_string(dev->of_node, "reset-names",
932
- prop, name) {
933
- rk_pcie->rsts[i].id = name;
934
- if (!rk_pcie->rsts[i].id)
935
- return -ENOMEM;
936
- i++;
937
- }
938
-
939
- for (i = 0; i < count; i++) {
940
- rk_pcie->rsts[i].rst = devm_reset_control_get_exclusive(dev,
941
- rk_pcie->rsts[i].id);
942
- if (IS_ERR_OR_NULL(rk_pcie->rsts[i].rst)) {
943
- dev_err(dev, "failed to get %s\n",
944
- rk_pcie->clks[i].id);
945
- return -PTR_ERR(rk_pcie->rsts[i].rst);
946
- }
947
- }
948
-
949
- for (i = 0; i < count; i++) {
950
- ret = reset_control_deassert(rk_pcie->rsts[i].rst);
951
- if (ret) {
952
- dev_err(dev, "failed to release %s\n",
953
- rk_pcie->rsts[i].id);
954
- return ret;
955
- }
956
- }
9571346
9581347 return 0;
9591348 }
....@@ -1058,13 +1447,37 @@
10581447 table->start.chnl = table->chn;
10591448 }
10601449
1450
+static void rk_pcie_hot_rst_work(struct work_struct *work)
1451
+{
1452
+ struct rk_pcie *rk_pcie = container_of(work, struct rk_pcie, hot_rst_work);
1453
+ u32 val, status;
1454
+ int ret;
1455
+
1456
+ /* Setup command register */
1457
+ val = dw_pcie_readl_dbi(rk_pcie->pci, PCI_COMMAND);
1458
+ val &= 0xffff0000;
1459
+ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
1460
+ PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
1461
+ dw_pcie_writel_dbi(rk_pcie->pci, PCI_COMMAND, val);
1462
+
1463
+ if (rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL) & PCIE_LTSSM_APP_DLY2_EN) {
1464
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS,
1465
+ status, ((status & 0x3F) == 0), 100, RK_PCIE_HOTRESET_TMOUT_US);
1466
+ if (ret)
1467
+ dev_err(rk_pcie->pci->dev, "wait for detect quiet failed!\n");
1468
+
1469
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL,
1470
+ (PCIE_LTSSM_APP_DLY2_DONE) | ((PCIE_LTSSM_APP_DLY2_DONE) << 16));
1471
+ }
1472
+}
1473
+
10611474 static irqreturn_t rk_pcie_sys_irq_handler(int irq, void *arg)
10621475 {
10631476 struct rk_pcie *rk_pcie = arg;
10641477 u32 chn;
10651478 union int_status status;
10661479 union int_clear clears;
1067
- u32 reg, val;
1480
+ u32 reg;
10681481
10691482 status.asdword = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
10701483 PCIE_DMA_WR_INT_STATUS);
....@@ -1105,14 +1518,8 @@
11051518 }
11061519
11071520 reg = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC);
1108
- if (reg & BIT(2)) {
1109
- /* Setup command register */
1110
- val = dw_pcie_readl_dbi(rk_pcie->pci, PCI_COMMAND);
1111
- val &= 0xffff0000;
1112
- val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
1113
- PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
1114
- dw_pcie_writel_dbi(rk_pcie->pci, PCI_COMMAND, val);
1115
- }
1521
+ if (reg & BIT(2))
1522
+ queue_work(rk_pcie->hot_rst_wq, &rk_pcie->hot_rst_work);
11161523
11171524 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC, reg);
11181525
....@@ -1149,6 +1556,11 @@
11491556 .mode = RK_PCIE_EP_TYPE,
11501557 };
11511558
1559
+static const struct rk_pcie_of_data rk3528_pcie_rc_of_data = {
1560
+ .mode = RK_PCIE_RC_TYPE,
1561
+ .msi_vector_num = 8,
1562
+};
1563
+
11521564 static const struct of_device_id rk_pcie_of_match[] = {
11531565 {
11541566 .compatible = "rockchip,rk1808-pcie",
....@@ -1159,11 +1571,27 @@
11591571 .data = &rk_pcie_ep_of_data,
11601572 },
11611573 {
1574
+ .compatible = "rockchip,rk3528-pcie",
1575
+ .data = &rk3528_pcie_rc_of_data,
1576
+ },
1577
+ {
1578
+ .compatible = "rockchip,rk3562-pcie",
1579
+ .data = &rk3528_pcie_rc_of_data,
1580
+ },
1581
+ {
11621582 .compatible = "rockchip,rk3568-pcie",
11631583 .data = &rk_pcie_rc_of_data,
11641584 },
11651585 {
11661586 .compatible = "rockchip,rk3568-pcie-ep",
1587
+ .data = &rk_pcie_ep_of_data,
1588
+ },
1589
+ {
1590
+ .compatible = "rockchip,rk3588-pcie",
1591
+ .data = &rk_pcie_rc_of_data,
1592
+ },
1593
+ {
1594
+ .compatible = "rockchip,rk3588-pcie-ep",
11671595 .data = &rk_pcie_ep_of_data,
11681596 },
11691597 {},
....@@ -1215,7 +1643,8 @@
12151643
12161644 /* LTSSM EN ctrl mode */
12171645 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL);
1218
- val |= PCIE_LTSSM_ENABLE_ENHANCE | (PCIE_LTSSM_ENABLE_ENHANCE << 16);
1646
+ val |= (PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN)
1647
+ | ((PCIE_LTSSM_APP_DLY2_EN | PCIE_LTSSM_ENABLE_ENHANCE) << 16);
12191648 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL, val);
12201649 }
12211650
....@@ -1253,7 +1682,7 @@
12531682 static int rk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
12541683 irq_hw_number_t hwirq)
12551684 {
1256
- irq_set_chip_and_handler(irq, &rk_pcie_legacy_irq_chip, handle_simple_irq);
1685
+ irq_set_chip_and_handler(irq, &rk_pcie_legacy_irq_chip, handle_level_irq);
12571686 irq_set_chip_data(irq, domain->host_data);
12581687
12591688 return 0;
....@@ -1342,41 +1771,6 @@
13421771 return ret;
13431772 }
13441773
1345
-static int rk_pci_find_capability(struct rk_pcie *rk_pcie, int cap)
1346
-{
1347
- u32 header;
1348
- int ttl;
1349
- int start = 0;
1350
- int pos = PCI_CFG_SPACE_SIZE;
1351
-
1352
- /* minimum 8 bytes per capability */
1353
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1354
-
1355
- header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
1356
-
1357
- /*
1358
- * If we have no capabilities, this is indicated by cap ID,
1359
- * cap version and next pointer all being 0.
1360
- */
1361
- if (header == 0)
1362
- return 0;
1363
-
1364
- while (ttl-- > 0) {
1365
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
1366
- return pos;
1367
-
1368
- pos = PCI_EXT_CAP_NEXT(header);
1369
- if (pos < PCI_CFG_SPACE_SIZE)
1370
- break;
1371
-
1372
- header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
1373
- if (!header)
1374
- break;
1375
- }
1376
-
1377
- return 0;
1378
-}
1379
-
13801774 #define RAS_DES_EVENT(ss, v) \
13811775 do { \
13821776 dw_pcie_writel_dbi(pcie->pci, cap_base + 8, v); \
....@@ -1387,8 +1781,27 @@
13871781 {
13881782 struct rk_pcie *pcie = s->private;
13891783 int cap_base;
1784
+ u32 val = rk_pcie_readl_apb(pcie, PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN);
1785
+ char *pm;
13901786
1391
- cap_base = rk_pci_find_capability(pcie, PCI_EXT_CAP_ID_VNDR);
1787
+ if (val & BIT(6))
1788
+ pm = "In training";
1789
+ else if (val & BIT(5))
1790
+ pm = "L1.2";
1791
+ else if (val & BIT(4))
1792
+ pm = "L1.1";
1793
+ else if (val & BIT(3))
1794
+ pm = "L1";
1795
+ else if (val & BIT(2))
1796
+ pm = "L0";
1797
+ else if (val & 0x3)
1798
+ pm = (val == 0x3) ? "L0s" : (val & BIT(1) ? "RX L0s" : "TX L0s");
1799
+ else
1800
+ pm = "Invalid";
1801
+
1802
+ seq_printf(s, "Common event signal status: 0x%s\n", pm);
1803
+
1804
+ cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
13921805 if (!cap_base) {
13931806 dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
13941807 return 0;
....@@ -1424,7 +1837,6 @@
14241837
14251838 return 0;
14261839 }
1427
-
14281840 static int rockchip_pcie_rasdes_open(struct inode *inode, struct file *file)
14291841 {
14301842 return single_open(file, rockchip_pcie_rasdes_show,
....@@ -1443,7 +1855,7 @@
14431855 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
14441856 return -EFAULT;
14451857
1446
- cap_base = rk_pci_find_capability(pcie, PCI_EXT_CAP_ID_VNDR);
1858
+ cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
14471859 if (!cap_base) {
14481860 dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
14491861 return 0;
....@@ -1528,29 +1940,35 @@
15281940 const struct rk_pcie_of_data *data;
15291941 enum rk_pcie_device_mode mode;
15301942 struct device_node *np = pdev->dev.of_node;
1531
- struct platform_driver *drv = to_platform_driver(dev->driver);
1532
- u32 val;
1943
+ u32 val = 0;
15331944 int irq;
15341945
15351946 match = of_match_device(rk_pcie_of_match, dev);
1536
- if (!match)
1537
- return -EINVAL;
1947
+ if (!match) {
1948
+ ret = -EINVAL;
1949
+ goto release_driver;
1950
+ }
15381951
15391952 data = (struct rk_pcie_of_data *)match->data;
15401953 mode = (enum rk_pcie_device_mode)data->mode;
15411954
15421955 rk_pcie = devm_kzalloc(dev, sizeof(*rk_pcie), GFP_KERNEL);
1543
- if (!rk_pcie)
1544
- return -ENOMEM;
1956
+ if (!rk_pcie) {
1957
+ ret = -ENOMEM;
1958
+ goto release_driver;
1959
+ }
15451960
15461961 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1547
- if (!pci)
1548
- return -ENOMEM;
1962
+ if (!pci) {
1963
+ ret = -ENOMEM;
1964
+ goto release_driver;
1965
+ }
15491966
15501967 pci->dev = dev;
15511968 pci->ops = &dw_pcie_ops;
15521969
15531970 rk_pcie->mode = mode;
1971
+ rk_pcie->msi_vector_num = data->msi_vector_num;
15541972 rk_pcie->pci = pci;
15551973
15561974 if (of_device_is_compatible(np, "rockchip,rk1808-pcie") ||
....@@ -1565,20 +1983,40 @@
15651983 ret = rk_pcie_resource_get(pdev, rk_pcie);
15661984 if (ret) {
15671985 dev_err(dev, "resource init failed\n");
1568
- return ret;
1986
+ goto release_driver;
15691987 }
15701988
1989
+ if (!IS_ERR_OR_NULL(rk_pcie->prsnt_gpio)) {
1990
+ if (!gpiod_get_value(rk_pcie->prsnt_gpio)) {
1991
+ ret = -ENODEV;
1992
+ goto release_driver;
1993
+ }
1994
+ }
1995
+
1996
+ rk_pcie->supports_clkreq = device_property_read_bool(dev, "supports-clkreq");
1997
+
1998
+retry_regulator:
15711999 /* DON'T MOVE ME: must be enable before phy init */
15722000 rk_pcie->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
15732001 if (IS_ERR(rk_pcie->vpcie3v3)) {
1574
- if (PTR_ERR(rk_pcie->vpcie3v3) != -ENODEV)
1575
- return PTR_ERR(rk_pcie->vpcie3v3);
2002
+ if (PTR_ERR(rk_pcie->vpcie3v3) != -ENODEV) {
2003
+ if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) {
2004
+ /* Deferred but in threaded context for most 10s */
2005
+ msleep(20);
2006
+ if (++val < 500)
2007
+ goto retry_regulator;
2008
+ }
2009
+
2010
+ ret = PTR_ERR(rk_pcie->vpcie3v3);
2011
+ goto release_driver;
2012
+ }
2013
+
15762014 dev_info(dev, "no vpcie3v3 regulator found\n");
15772015 }
15782016
15792017 ret = rk_pcie_enable_power(rk_pcie);
15802018 if (ret)
1581
- return ret;
2019
+ goto release_driver;
15822020
15832021 ret = rk_pcie_phy_init(rk_pcie);
15842022 if (ret) {
....@@ -1586,16 +2024,19 @@
15862024 goto disable_vpcie3v3;
15872025 }
15882026
1589
- ret = rk_pcie_reset_control_release(rk_pcie);
1590
- if (ret) {
1591
- dev_err(dev, "reset control init failed\n");
1592
- goto disable_vpcie3v3;
2027
+ rk_pcie->rsts = devm_reset_control_array_get_exclusive(dev);
2028
+ if (IS_ERR(rk_pcie->rsts)) {
2029
+ ret = PTR_ERR(rk_pcie->rsts);
2030
+ dev_err(dev, "failed to get reset lines\n");
2031
+ goto disable_phy;
15932032 }
2033
+
2034
+ reset_control_deassert(rk_pcie->rsts);
15942035
15952036 ret = rk_pcie_request_sys_irq(rk_pcie, pdev);
15962037 if (ret) {
15972038 dev_err(dev, "pcie irq init failed\n");
1598
- goto disable_vpcie3v3;
2039
+ goto disable_phy;
15992040 }
16002041
16012042 platform_set_drvdata(pdev, rk_pcie);
....@@ -1603,7 +2044,7 @@
16032044 ret = rk_pcie_clk_init(rk_pcie);
16042045 if (ret) {
16052046 dev_err(dev, "clock init failed\n");
1606
- goto disable_vpcie3v3;
2047
+ goto disable_phy;
16072048 }
16082049
16092050 dw_pcie_dbi_ro_wr_en(pci);
....@@ -1626,9 +2067,9 @@
16262067 /* Unmask all legacy interrupt from INTA~INTD */
16272068 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
16282069 UNMASK_ALL_LEGACY_INT);
2070
+ } else {
2071
+ dev_info(dev, "missing legacy IRQ resource\n");
16292072 }
1630
-
1631
- dev_info(dev, "missing legacy IRQ resource\n");
16322073 }
16332074
16342075 /* Set PCIe mode */
....@@ -1642,13 +2083,39 @@
16422083 rk_pcie->is_signal_test = true;
16432084 }
16442085
1645
- /* Force into compliance mode */
1646
- if (device_property_read_bool(dev, "rockchip,compliance-mode")) {
1647
- val = dw_pcie_readl_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS);
1648
- val |= BIT(4);
1649
- dw_pcie_writel_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS, val);
2086
+ /*
2087
+ * Force into compliance mode
2088
+ * comp_prst is a two dimensional array of which the first element
2089
+ * stands for speed mode, and the second one is preset value encoding:
2090
+ * [0] 0->SMA tool control the signal switch, 1/2/3 is for manual Gen setting
2091
+ * [1] transmitter setting for manual Gen setting, valid only if [0] isn't zero.
2092
+ */
2093
+ if (!device_property_read_u32_array(dev, "rockchip,compliance-mode",
2094
+ rk_pcie->comp_prst, 2)) {
2095
+ BUG_ON(rk_pcie->comp_prst[0] > 3 || rk_pcie->comp_prst[1] > 10);
2096
+ if (!rk_pcie->comp_prst[0]) {
2097
+ dev_info(dev, "Auto compliance mode for SMA tool.\n");
2098
+ } else {
2099
+ dev_info(dev, "compliance mode for soldered board Gen%d, P%d.\n",
2100
+ rk_pcie->comp_prst[0], rk_pcie->comp_prst[1]);
2101
+ val = dw_pcie_readl_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS);
2102
+ val |= BIT(4) | rk_pcie->comp_prst[0] | (rk_pcie->comp_prst[1] << 12);
2103
+ dw_pcie_writel_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS, val);
2104
+ }
16502105 rk_pcie->is_signal_test = true;
16512106 }
2107
+
2108
+ /* Skip waiting for training to pass in system PM routine */
2109
+ if (device_property_read_bool(dev, "rockchip,skip-scan-in-resume"))
2110
+ rk_pcie->skip_scan_in_resume = true;
2111
+
2112
+ rk_pcie->hot_rst_wq = create_singlethread_workqueue("rk_pcie_hot_rst_wq");
2113
+ if (!rk_pcie->hot_rst_wq) {
2114
+ dev_err(dev, "failed to create hot_rst workqueue\n");
2115
+ ret = -ENOMEM;
2116
+ goto remove_irq_domain;
2117
+ }
2118
+ INIT_WORK(&rk_pcie->hot_rst_work, rk_pcie_hot_rst_work);
16522119
16532120 switch (rk_pcie->mode) {
16542121 case RK_PCIE_RC_TYPE:
....@@ -1663,12 +2130,12 @@
16632130 return 0;
16642131
16652132 if (ret)
1666
- goto remove_irq_domain;
2133
+ goto remove_rst_wq;
16672134
16682135 ret = rk_pcie_init_dma_trx(rk_pcie);
16692136 if (ret) {
16702137 dev_err(dev, "failed to add dma extension\n");
1671
- return ret;
2138
+ goto remove_rst_wq;
16722139 }
16732140
16742141 if (rk_pcie->dma_obj) {
....@@ -1680,13 +2147,15 @@
16802147 /* hold link reset grant after link-up */
16812148 ret = rk_pcie_reset_grant_ctrl(rk_pcie, false);
16822149 if (ret)
1683
- goto remove_irq_domain;
2150
+ goto remove_rst_wq;
16842151 }
16852152
16862153 dw_pcie_dbi_ro_wr_dis(pci);
16872154
16882155 device_init_wakeup(dev, true);
1689
- drv->driver.pm = &rockchip_dw_pcie_pm_ops;
2156
+
2157
+ /* Enable async system PM for multiports SoC */
2158
+ device_enable_async_suspend(dev);
16902159
16912160 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
16922161 ret = rockchip_pcie_debugfs_init(rk_pcie);
....@@ -1694,7 +2163,7 @@
16942163 dev_err(dev, "failed to setup debugfs: %d\n", ret);
16952164
16962165 /* Enable RASDES Error event by default */
1697
- val = rk_pci_find_capability(rk_pcie, PCI_EXT_CAP_ID_VNDR);
2166
+ val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_VNDR);
16982167 if (!val) {
16992168 dev_err(dev, "Not able to find RASDES CAP!\n");
17002169 return 0;
....@@ -1706,35 +2175,183 @@
17062175
17072176 return 0;
17082177
2178
+remove_rst_wq:
2179
+ destroy_workqueue(rk_pcie->hot_rst_wq);
17092180 remove_irq_domain:
17102181 if (rk_pcie->irq_domain)
17112182 irq_domain_remove(rk_pcie->irq_domain);
2183
+disable_phy:
2184
+ phy_power_off(rk_pcie->phy);
2185
+ phy_exit(rk_pcie->phy);
17122186 deinit_clk:
1713
- rk_pcie_clk_deinit(rk_pcie);
2187
+ clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
17142188 disable_vpcie3v3:
17152189 rk_pcie_disable_power(rk_pcie);
2190
+release_driver:
2191
+ if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT))
2192
+ device_release_driver(dev);
17162193
17172194 return ret;
17182195 }
17192196
17202197 static int rk_pcie_probe(struct platform_device *pdev)
17212198 {
1722
- struct task_struct *tsk;
2199
+ if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) {
2200
+ struct task_struct *tsk;
17232201
1724
- tsk = kthread_run(rk_pcie_really_probe, pdev, "rk-pcie");
1725
- if (IS_ERR(tsk)) {
1726
- dev_err(&pdev->dev, "start rk-pcie thread failed\n");
1727
- return PTR_ERR(tsk);
2202
+ tsk = kthread_run(rk_pcie_really_probe, pdev, "rk-pcie");
2203
+ if (IS_ERR(tsk)) {
2204
+ dev_err(&pdev->dev, "start rk-pcie thread failed\n");
2205
+ return PTR_ERR(tsk);
2206
+ }
2207
+
2208
+ return 0;
17282209 }
1729
- return 0;
2210
+
2211
+ return rk_pcie_really_probe(pdev);
17302212 }
2213
+
2214
+#ifdef CONFIG_PCIEASPM
2215
+static void rk_pcie_downstream_dev_to_d0(struct rk_pcie *rk_pcie, bool enable)
2216
+{
2217
+ struct pcie_port *pp = &rk_pcie->pci->pp;
2218
+ struct pci_bus *child, *root_bus = NULL;
2219
+ struct pci_dev *pdev, *bridge;
2220
+ u32 val;
2221
+
2222
+ list_for_each_entry(child, &pp->bridge->bus->children, node) {
2223
+ /* Bring downstream devices to D3 if they are not already in */
2224
+ if (child->parent == pp->bridge->bus) {
2225
+ root_bus = child;
2226
+ bridge = root_bus->self;
2227
+ break;
2228
+ }
2229
+ }
2230
+
2231
+ if (!root_bus) {
2232
+ dev_err(rk_pcie->pci->dev, "Failed to find downstream devices\n");
2233
+ return;
2234
+ }
2235
+
2236
+ /* Save and restore root bus ASPM */
2237
+ if (enable) {
2238
+ if (rk_pcie->l1ss_ctl1)
2239
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1, rk_pcie->l1ss_ctl1);
2240
+
2241
+ /* rk_pcie->aspm woule be saved in advance when enable is false */
2242
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, rk_pcie->aspm);
2243
+ } else {
2244
+ val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1);
2245
+ if (val & PCI_L1SS_CTL1_L1SS_MASK)
2246
+ rk_pcie->l1ss_ctl1 = val;
2247
+ else
2248
+ rk_pcie->l1ss_ctl1 = 0;
2249
+
2250
+ val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL);
2251
+ rk_pcie->aspm = val & PCI_EXP_LNKCTL_ASPMC;
2252
+ val &= ~(PCI_EXP_LNKCAP_ASPM_L1 | PCI_EXP_LNKCAP_ASPM_L0S);
2253
+ dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, val);
2254
+ }
2255
+
2256
+ list_for_each_entry(pdev, &root_bus->devices, bus_list) {
2257
+ if (PCI_SLOT(pdev->devfn) == 0) {
2258
+ if (pci_set_power_state(pdev, PCI_D0))
2259
+ dev_err(rk_pcie->pci->dev,
2260
+ "Failed to transition %s to D3hot state\n",
2261
+ dev_name(&pdev->dev));
2262
+ if (enable) {
2263
+ if (rk_pcie->l1ss_ctl1) {
2264
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, &val);
2265
+ val &= ~PCI_L1SS_CTL1_L1SS_MASK;
2266
+ val |= (rk_pcie->l1ss_ctl1 & PCI_L1SS_CTL1_L1SS_MASK);
2267
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, val);
2268
+ }
2269
+
2270
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
2271
+ PCI_EXP_LNKCTL_ASPMC, rk_pcie->aspm);
2272
+ } else {
2273
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2274
+ }
2275
+ }
2276
+ }
2277
+}
2278
+#endif
17312279
17322280 static int __maybe_unused rockchip_dw_pcie_suspend(struct device *dev)
17332281 {
17342282 struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
1735
- int ret;
2283
+ int ret = 0, power;
2284
+ struct dw_pcie *pci = rk_pcie->pci;
2285
+ u32 status;
2286
+
2287
+ /*
2288
+ * This is as per PCI Express Base r5.0 r1.0 May 22-2019,
2289
+ * 5.2 Link State Power Management (Page #440).
2290
+ *
2291
+ * L2/L3 Ready entry negotiations happen while in the L0 state.
2292
+ * L2/L3 Ready are entered only after the negotiation completes.
2293
+ *
2294
+ * The following example sequence illustrates the multi-step Link state
2295
+ * transition process leading up to entering a system sleep state:
2296
+ * 1. System software directs all Functions of a Downstream component to D3Hot.
2297
+ * 2. The Downstream component then initiates the transition of the Link to L1
2298
+ * as required.
2299
+ * 3. System software then causes the Root Complex to broadcast the PME_Turn_Off
2300
+ * Message in preparation for removing the main power source.
2301
+ * 4. This Message causes the subject Link to transition back to L0 in order to
2302
+ * send it and to enable the Downstream component to respond with PME_TO_Ack.
2303
+ * 5. After sending the PME_TO_Ack, the Downstream component initiates the L2/L3
2304
+ * Ready transition protocol.
2305
+ */
2306
+
2307
+ /* 1. All sub-devices are in D3hot by PCIe stack */
2308
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
17362309
17372310 rk_pcie_link_status_clear(rk_pcie);
2311
+
2312
+ /*
2313
+ * Wlan devices will be shutdown from function driver now, so doing L2 here
2314
+ * must fail. Skip L2 routine.
2315
+ */
2316
+ if (rk_pcie->skip_scan_in_resume) {
2317
+ rfkill_get_wifi_power_state(&power);
2318
+ if (!power)
2319
+ goto no_l2;
2320
+ }
2321
+
2322
+ /* 2. Broadcast PME_Turn_Off Message */
2323
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_MSG_GEN, PME_TURN_OFF);
2324
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_MSG_GEN,
2325
+ status, !(status & BIT(4)), 20, RK_PCIE_L2_TMOUT_US);
2326
+ if (ret) {
2327
+ dev_err(dev, "Failed to send PME_Turn_Off\n");
2328
+ goto no_l2;
2329
+ }
2330
+
2331
+ /* 3. Wait for PME_TO_Ack */
2332
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_INTR_STATUS_MSG_RX,
2333
+ status, status & BIT(9), 20, RK_PCIE_L2_TMOUT_US);
2334
+ if (ret) {
2335
+ dev_err(dev, "Failed to receive PME_TO_Ack\n");
2336
+ goto no_l2;
2337
+ }
2338
+
2339
+ /* 4. Clear PME_TO_Ack and Wait for ready to enter L23 message */
2340
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MSG_RX, PME_TO_ACK);
2341
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_POWER,
2342
+ status, status & READY_ENTER_L23, 20, RK_PCIE_L2_TMOUT_US);
2343
+ if (ret) {
2344
+ dev_err(dev, "Failed to ready to enter L23\n");
2345
+ goto no_l2;
2346
+ }
2347
+
2348
+ /* 5. Check we are in L2 */
2349
+ ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS,
2350
+ status, ((status & S_MAX) == S_L2_IDLE), 20, RK_PCIE_L2_TMOUT_US);
2351
+ if (ret)
2352
+ dev_err(pci->dev, "Link isn't in L2 idle!\n");
2353
+
2354
+no_l2:
17382355 rk_pcie_disable_ltssm(rk_pcie);
17392356
17402357 /* make sure assert phy success */
....@@ -1743,15 +2360,14 @@
17432360 phy_power_off(rk_pcie->phy);
17442361 phy_exit(rk_pcie->phy);
17452362
1746
- clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks);
2363
+ rk_pcie->intx = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY);
2364
+
2365
+ clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
17472366
17482367 rk_pcie->in_suspend = true;
17492368
17502369 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
17512370 ret = rk_pcie_disable_power(rk_pcie);
1752
-
1753
- if (rk_pcie->pci->pp.msi_irq > 0)
1754
- dw_pcie_free_msi(&rk_pcie->pci->pp);
17552371
17562372 return ret;
17572373 }
....@@ -1762,20 +2378,25 @@
17622378 bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
17632379 int ret;
17642380
2381
+ reset_control_assert(rk_pcie->rsts);
2382
+ udelay(10);
2383
+ reset_control_deassert(rk_pcie->rsts);
2384
+
17652385 ret = rk_pcie_enable_power(rk_pcie);
17662386 if (ret)
17672387 return ret;
17682388
1769
- ret = clk_bulk_enable(rk_pcie->clk_cnt, rk_pcie->clks);
2389
+ ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
17702390 if (ret) {
1771
- clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
2391
+ dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
17722392 return ret;
17732393 }
17742394
1775
- ret = phy_set_mode(rk_pcie->phy, rk_pcie->phy_mode);
2395
+ ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
2396
+ rk_pcie->phy_sub_mode);
17762397 if (ret) {
17772398 dev_err(dev, "fail to set phy to mode %s, err %d\n",
1778
- (rk_pcie->phy_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
2399
+ (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
17792400 ret);
17802401 return ret;
17812402 }
....@@ -1805,14 +2426,14 @@
18052426 if (std_rc)
18062427 dw_pcie_setup_rc(&rk_pcie->pci->pp);
18072428
2429
+ rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
2430
+ rk_pcie->intx | 0xffff0000);
2431
+
18082432 ret = rk_pcie_establish_link(rk_pcie->pci);
18092433 if (ret) {
18102434 dev_err(dev, "failed to establish pcie link\n");
18112435 goto err;
18122436 }
1813
-
1814
- if (rk_pcie->pci->pp.msi_irq > 0)
1815
- dw_pcie_msi_init(&rk_pcie->pci->pp);
18162437
18172438 if (std_rc)
18182439 goto std_rc_done;
....@@ -1836,6 +2457,9 @@
18362457 goto err;
18372458 }
18382459
2460
+ if (rk_pcie->pci->pp.msi_irq > 0)
2461
+ dw_pcie_msi_init(&rk_pcie->pci->pp);
2462
+
18392463 return 0;
18402464 err:
18412465 rk_pcie_disable_power(rk_pcie);
....@@ -1843,7 +2467,33 @@
18432467 return ret;
18442468 }
18452469
2470
+#ifdef CONFIG_PCIEASPM
2471
+static int rockchip_dw_pcie_prepare(struct device *dev)
2472
+{
2473
+ struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2474
+
2475
+ dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2476
+ rk_pcie_downstream_dev_to_d0(rk_pcie, false);
2477
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2478
+
2479
+ return 0;
2480
+}
2481
+
2482
+static void rockchip_dw_pcie_complete(struct device *dev)
2483
+{
2484
+ struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2485
+
2486
+ dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2487
+ rk_pcie_downstream_dev_to_d0(rk_pcie, true);
2488
+ dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2489
+}
2490
+#endif
2491
+
18462492 static const struct dev_pm_ops rockchip_dw_pcie_pm_ops = {
2493
+#ifdef CONFIG_PCIEASPM
2494
+ .prepare = rockchip_dw_pcie_prepare,
2495
+ .complete = rockchip_dw_pcie_complete,
2496
+#endif
18472497 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_dw_pcie_suspend,
18482498 rockchip_dw_pcie_resume)
18492499 };
....@@ -1853,10 +2503,12 @@
18532503 .name = "rk-pcie",
18542504 .of_match_table = rk_pcie_of_match,
18552505 .suppress_bind_attrs = true,
2506
+ .pm = &rockchip_dw_pcie_pm_ops,
18562507 },
2508
+ .probe = rk_pcie_probe,
18572509 };
18582510
1859
-module_platform_driver_probe(rk_plat_pcie_driver, rk_pcie_probe);
2511
+module_platform_driver(rk_plat_pcie_driver);
18602512
18612513 MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com>");
18622514 MODULE_DESCRIPTION("RockChip PCIe Controller driver");