hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/pci/controller/pci-tegra.c
....@@ -17,6 +17,7 @@
1717 #include <linux/debugfs.h>
1818 #include <linux/delay.h>
1919 #include <linux/export.h>
20
+#include <linux/gpio/consumer.h>
2021 #include <linux/interrupt.h>
2122 #include <linux/iopoll.h>
2223 #include <linux/irq.h>
....@@ -30,6 +31,7 @@
3031 #include <linux/of_platform.h>
3132 #include <linux/pci.h>
3233 #include <linux/phy/phy.h>
34
+#include <linux/pinctrl/consumer.h>
3335 #include <linux/platform_device.h>
3436 #include <linux/reset.h>
3537 #include <linux/sizes.h>
....@@ -95,7 +97,8 @@
9597 #define AFI_MSI_EN_VEC7 0xa8
9698
9799 #define AFI_CONFIGURATION 0xac
98
-#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
100
+#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
101
+#define AFI_CONFIGURATION_CLKEN_OVERRIDE (1 << 31)
99102
100103 #define AFI_FPCI_ERROR_MASKS 0xb0
101104
....@@ -159,13 +162,14 @@
159162 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
160163 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
161164 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
165
+#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x) (1 << ((x) + 29))
166
+#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL (0x7 << 29)
162167
163168 #define AFI_FUSE 0x104
164169 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
165170
166171 #define AFI_PEX0_CTRL 0x110
167172 #define AFI_PEX1_CTRL 0x118
168
-#define AFI_PEX2_CTRL 0x128
169173 #define AFI_PEX_CTRL_RST (1 << 0)
170174 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
171175 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
....@@ -177,19 +181,66 @@
177181
178182 #define AFI_PEXBIAS_CTRL_0 0x168
179183
184
+#define RP_ECTL_2_R1 0x00000e84
185
+#define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff
186
+
187
+#define RP_ECTL_4_R1 0x00000e8c
188
+#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK (0xffff << 16)
189
+#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT 16
190
+
191
+#define RP_ECTL_5_R1 0x00000e90
192
+#define RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK 0xffffffff
193
+
194
+#define RP_ECTL_6_R1 0x00000e94
195
+#define RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK 0xffffffff
196
+
197
+#define RP_ECTL_2_R2 0x00000ea4
198
+#define RP_ECTL_2_R2_RX_CTLE_1C_MASK 0xffff
199
+
200
+#define RP_ECTL_4_R2 0x00000eac
201
+#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK (0xffff << 16)
202
+#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT 16
203
+
204
+#define RP_ECTL_5_R2 0x00000eb0
205
+#define RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK 0xffffffff
206
+
207
+#define RP_ECTL_6_R2 0x00000eb4
208
+#define RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK 0xffffffff
209
+
180210 #define RP_VEND_XP 0x00000f00
181
-#define RP_VEND_XP_DL_UP (1 << 30)
211
+#define RP_VEND_XP_DL_UP (1 << 30)
212
+#define RP_VEND_XP_OPPORTUNISTIC_ACK (1 << 27)
213
+#define RP_VEND_XP_OPPORTUNISTIC_UPDATEFC (1 << 28)
214
+#define RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK (0xff << 18)
215
+
216
+#define RP_VEND_CTL0 0x00000f44
217
+#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK (0xf << 12)
218
+#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH (0x9 << 12)
219
+
220
+#define RP_VEND_CTL1 0x00000f48
221
+#define RP_VEND_CTL1_ERPT (1 << 13)
222
+
223
+#define RP_VEND_XP_BIST 0x00000f4c
224
+#define RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28)
182225
183226 #define RP_VEND_CTL2 0x00000fa8
184227 #define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
185228
186229 #define RP_PRIV_MISC 0x00000fe0
187
-#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
188
-#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
230
+#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
231
+#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
232
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK (0x7f << 16)
233
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xf << 16)
234
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23)
235
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK (0x7f << 24)
236
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xf << 24)
237
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31)
189238
190239 #define RP_LINK_CONTROL_STATUS 0x00000090
191240 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
192241 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
242
+
243
+#define RP_LINK_CONTROL_STATUS_2 0x000000b0
193244
194245 #define PADS_CTL_SEL 0x0000009c
195246
....@@ -226,14 +277,15 @@
226277 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
227278
228279 #define PME_ACK_TIMEOUT 10000
280
+#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
229281
230282 struct tegra_msi {
231283 struct msi_controller chip;
232284 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
233285 struct irq_domain *domain;
234
- unsigned long pages;
235286 struct mutex lock;
236
- u64 phys;
287
+ void *virt;
288
+ dma_addr_t phys;
237289 int irq;
238290 };
239291
....@@ -249,10 +301,12 @@
249301 unsigned int num_ports;
250302 const struct tegra_pcie_port_soc *ports;
251303 unsigned int msi_base_shift;
304
+ unsigned long afi_pex2_ctrl;
252305 u32 pads_pll_ctl;
253306 u32 tx_ref_sel;
254307 u32 pads_refclk_cfg0;
255308 u32 pads_refclk_cfg1;
309
+ u32 update_fc_threshold;
256310 bool has_pex_clkreq_en;
257311 bool has_pex_bias_ctrl;
258312 bool has_intr_prsnt_sense;
....@@ -260,6 +314,23 @@
260314 bool has_gen2;
261315 bool force_pca_enable;
262316 bool program_uphy;
317
+ bool update_clamp_threshold;
318
+ bool program_deskew_time;
319
+ bool update_fc_timer;
320
+ bool has_cache_bars;
321
+ struct {
322
+ struct {
323
+ u32 rp_ectl_2_r1;
324
+ u32 rp_ectl_4_r1;
325
+ u32 rp_ectl_5_r1;
326
+ u32 rp_ectl_6_r1;
327
+ u32 rp_ectl_2_r2;
328
+ u32 rp_ectl_4_r2;
329
+ u32 rp_ectl_5_r2;
330
+ u32 rp_ectl_6_r2;
331
+ } regs;
332
+ bool enable;
333
+ } ectl;
263334 };
264335
265336 static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
....@@ -276,16 +347,6 @@
276347 int irq;
277348
278349 struct resource cs;
279
- struct resource io;
280
- struct resource pio;
281
- struct resource mem;
282
- struct resource prefetch;
283
- struct resource busn;
284
-
285
- struct {
286
- resource_size_t mem;
287
- resource_size_t io;
288
- } offset;
289350
290351 struct clk *pex_clk;
291352 struct clk *afi_clk;
....@@ -321,6 +382,8 @@
321382 unsigned int lanes;
322383
323384 struct phy **phys;
385
+
386
+ struct gpio_desc *reset_gpio;
324387 };
325388
326389 struct tegra_pcie_bus {
....@@ -440,6 +503,7 @@
440503
441504 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
442505 {
506
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
443507 unsigned long ret = 0;
444508
445509 switch (port->index) {
....@@ -452,7 +516,7 @@
452516 break;
453517
454518 case 2:
455
- ret = AFI_PEX2_CTRL;
519
+ ret = soc->afi_pex2_ctrl;
456520 break;
457521 }
458522
....@@ -465,15 +529,145 @@
465529 unsigned long value;
466530
467531 /* pulse reset signal */
468
- value = afi_readl(port->pcie, ctrl);
469
- value &= ~AFI_PEX_CTRL_RST;
470
- afi_writel(port->pcie, value, ctrl);
532
+ if (port->reset_gpio) {
533
+ gpiod_set_value(port->reset_gpio, 1);
534
+ } else {
535
+ value = afi_readl(port->pcie, ctrl);
536
+ value &= ~AFI_PEX_CTRL_RST;
537
+ afi_writel(port->pcie, value, ctrl);
538
+ }
471539
472540 usleep_range(1000, 2000);
473541
474
- value = afi_readl(port->pcie, ctrl);
475
- value |= AFI_PEX_CTRL_RST;
476
- afi_writel(port->pcie, value, ctrl);
542
+ if (port->reset_gpio) {
543
+ gpiod_set_value(port->reset_gpio, 0);
544
+ } else {
545
+ value = afi_readl(port->pcie, ctrl);
546
+ value |= AFI_PEX_CTRL_RST;
547
+ afi_writel(port->pcie, value, ctrl);
548
+ }
549
+}
550
+
551
+static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
552
+{
553
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
554
+ u32 value;
555
+
556
+ /* Enable AER capability */
557
+ value = readl(port->base + RP_VEND_CTL1);
558
+ value |= RP_VEND_CTL1_ERPT;
559
+ writel(value, port->base + RP_VEND_CTL1);
560
+
561
+ /* Optimal settings to enhance bandwidth */
562
+ value = readl(port->base + RP_VEND_XP);
563
+ value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
564
+ value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
565
+ writel(value, port->base + RP_VEND_XP);
566
+
567
+ /*
568
+ * LTSSM will wait for DLLP to finish before entering L1 or L2,
569
+ * to avoid truncation of PM messages which results in receiver errors
570
+ */
571
+ value = readl(port->base + RP_VEND_XP_BIST);
572
+ value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
573
+ writel(value, port->base + RP_VEND_XP_BIST);
574
+
575
+ value = readl(port->base + RP_PRIV_MISC);
576
+ value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
577
+ value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
578
+
579
+ if (soc->update_clamp_threshold) {
580
+ value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
581
+ RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
582
+ value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
583
+ RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
584
+ }
585
+
586
+ writel(value, port->base + RP_PRIV_MISC);
587
+}
588
+
589
+static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
590
+{
591
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
592
+ u32 value;
593
+
594
+ value = readl(port->base + RP_ECTL_2_R1);
595
+ value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
596
+ value |= soc->ectl.regs.rp_ectl_2_r1;
597
+ writel(value, port->base + RP_ECTL_2_R1);
598
+
599
+ value = readl(port->base + RP_ECTL_4_R1);
600
+ value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
601
+ value |= soc->ectl.regs.rp_ectl_4_r1 <<
602
+ RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
603
+ writel(value, port->base + RP_ECTL_4_R1);
604
+
605
+ value = readl(port->base + RP_ECTL_5_R1);
606
+ value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
607
+ value |= soc->ectl.regs.rp_ectl_5_r1;
608
+ writel(value, port->base + RP_ECTL_5_R1);
609
+
610
+ value = readl(port->base + RP_ECTL_6_R1);
611
+ value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
612
+ value |= soc->ectl.regs.rp_ectl_6_r1;
613
+ writel(value, port->base + RP_ECTL_6_R1);
614
+
615
+ value = readl(port->base + RP_ECTL_2_R2);
616
+ value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
617
+ value |= soc->ectl.regs.rp_ectl_2_r2;
618
+ writel(value, port->base + RP_ECTL_2_R2);
619
+
620
+ value = readl(port->base + RP_ECTL_4_R2);
621
+ value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
622
+ value |= soc->ectl.regs.rp_ectl_4_r2 <<
623
+ RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
624
+ writel(value, port->base + RP_ECTL_4_R2);
625
+
626
+ value = readl(port->base + RP_ECTL_5_R2);
627
+ value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
628
+ value |= soc->ectl.regs.rp_ectl_5_r2;
629
+ writel(value, port->base + RP_ECTL_5_R2);
630
+
631
+ value = readl(port->base + RP_ECTL_6_R2);
632
+ value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
633
+ value |= soc->ectl.regs.rp_ectl_6_r2;
634
+ writel(value, port->base + RP_ECTL_6_R2);
635
+}
636
+
637
+static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
638
+{
639
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
640
+ u32 value;
641
+
642
+ /*
643
+ * Sometimes link speed change from Gen2 to Gen1 fails due to
644
+ * instability in deskew logic on lane-0. Increase the deskew
645
+ * retry time to resolve this issue.
646
+ */
647
+ if (soc->program_deskew_time) {
648
+ value = readl(port->base + RP_VEND_CTL0);
649
+ value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
650
+ value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
651
+ writel(value, port->base + RP_VEND_CTL0);
652
+ }
653
+
654
+ if (soc->update_fc_timer) {
655
+ value = readl(port->base + RP_VEND_XP);
656
+ value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
657
+ value |= soc->update_fc_threshold;
658
+ writel(value, port->base + RP_VEND_XP);
659
+ }
660
+
661
+ /*
662
+ * PCIe link doesn't come up with few legacy PCIe endpoints if
663
+ * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
664
+ * Hence, the strategy followed here is to initially advertise
665
+ * only Gen-1 and after link is up, retrain link to Gen-2 speed
666
+ */
667
+ value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
668
+ value &= ~PCI_EXP_LNKSTA_CLS;
669
+ value |= PCI_EXP_LNKSTA_CLS_2_5GB;
670
+ writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
477671 }
478672
479673 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
....@@ -500,6 +694,13 @@
500694 value |= RP_VEND_CTL2_PCA_ENABLE;
501695 writel(value, port->base + RP_VEND_CTL2);
502696 }
697
+
698
+ tegra_pcie_enable_rp_features(port);
699
+
700
+ if (soc->ectl.enable)
701
+ tegra_pcie_program_ectl_settings(port);
702
+
703
+ tegra_pcie_apply_sw_fixup(port);
503704 }
504705
505706 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
....@@ -521,6 +722,12 @@
521722
522723 value &= ~AFI_PEX_CTRL_REFCLK_EN;
523724 afi_writel(port->pcie, value, ctrl);
725
+
726
+ /* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
727
+ value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
728
+ value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
729
+ value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
730
+ afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
524731 }
525732
526733 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
....@@ -554,38 +761,6 @@
554761 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
555762 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
556763 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
557
-
558
-static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
559
-{
560
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
561
- struct list_head *windows = &host->windows;
562
- struct device *dev = pcie->dev;
563
- int err;
564
-
565
- pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
566
- pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
567
- pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
568
- pci_add_resource(windows, &pcie->busn);
569
-
570
- err = devm_request_pci_bus_resources(dev, windows);
571
- if (err < 0) {
572
- pci_free_resource_list(windows);
573
- return err;
574
- }
575
-
576
- pci_remap_iospace(&pcie->pio, pcie->io.start);
577
-
578
- return 0;
579
-}
580
-
581
-static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
582
-{
583
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
584
- struct list_head *windows = &host->windows;
585
-
586
- pci_unmap_iospace(&pcie->pio);
587
- pci_free_resource_list(windows);
588
-}
589764
590765 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
591766 {
....@@ -638,7 +813,7 @@
638813 * do not pollute kernel log with master abort reports since they
639814 * happen a lot during enumeration
640815 */
641
- if (code == AFI_INTR_MASTER_ABORT)
816
+ if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
642817 dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
643818 else
644819 dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
....@@ -667,36 +842,49 @@
667842 */
668843 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
669844 {
670
- u32 fpci_bar, size, axi_address;
845
+ u32 size;
846
+ struct resource_entry *entry;
847
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
671848
672849 /* Bar 0: type 1 extended configuration space */
673850 size = resource_size(&pcie->cs);
674851 afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
675852 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
676853
677
- /* Bar 1: downstream IO bar */
678
- fpci_bar = 0xfdfc0000;
679
- size = resource_size(&pcie->io);
680
- axi_address = pcie->io.start;
681
- afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
682
- afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
683
- afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
854
+ resource_list_for_each_entry(entry, &bridge->windows) {
855
+ u32 fpci_bar, axi_address;
856
+ struct resource *res = entry->res;
684857
685
- /* Bar 2: prefetchable memory BAR */
686
- fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
687
- size = resource_size(&pcie->prefetch);
688
- axi_address = pcie->prefetch.start;
689
- afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
690
- afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
691
- afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
858
+ size = resource_size(res);
692859
693
- /* Bar 3: non prefetchable memory BAR */
694
- fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
695
- size = resource_size(&pcie->mem);
696
- axi_address = pcie->mem.start;
697
- afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
698
- afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
699
- afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
860
+ switch (resource_type(res)) {
861
+ case IORESOURCE_IO:
862
+ /* Bar 1: downstream IO bar */
863
+ fpci_bar = 0xfdfc0000;
864
+ axi_address = pci_pio_to_address(res->start);
865
+ afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
866
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
867
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
868
+ break;
869
+ case IORESOURCE_MEM:
870
+ fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
871
+ axi_address = res->start;
872
+
873
+ if (res->flags & IORESOURCE_PREFETCH) {
874
+ /* Bar 2: prefetchable memory BAR */
875
+ afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
876
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
877
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
878
+
879
+ } else {
880
+ /* Bar 3: non prefetchable memory BAR */
881
+ afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
882
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
883
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
884
+ }
885
+ break;
886
+ }
887
+ }
700888
701889 /* NULL out the remaining BARs as they are not used */
702890 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
....@@ -707,11 +895,13 @@
707895 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
708896 afi_writel(pcie, 0, AFI_FPCI_BAR5);
709897
710
- /* map all upstream transactions as uncached */
711
- afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
712
- afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
713
- afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
714
- afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
898
+ if (pcie->soc->has_cache_bars) {
899
+ /* map all upstream transactions as uncached */
900
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
901
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
902
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
903
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
904
+ }
715905
716906 /* MSI translations are setup only when needed */
717907 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
....@@ -855,7 +1045,6 @@
8551045 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
8561046 {
8571047 struct device *dev = pcie->dev;
858
- const struct tegra_pcie_soc *soc = pcie->soc;
8591048 struct tegra_pcie_port *port;
8601049 int err;
8611050
....@@ -880,12 +1069,6 @@
8801069 return err;
8811070 }
8821071 }
883
-
884
- /* Configure the reference clock driver */
885
- pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
886
-
887
- if (soc->num_ports > 2)
888
- pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
8891072
8901073 return 0;
8911074 }
....@@ -921,13 +1104,11 @@
9211104 return 0;
9221105 }
9231106
924
-static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1107
+static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
9251108 {
926
- struct device *dev = pcie->dev;
9271109 const struct tegra_pcie_soc *soc = pcie->soc;
9281110 struct tegra_pcie_port *port;
9291111 unsigned long value;
930
- int err;
9311112
9321113 /* enable PLL power down */
9331114 if (pcie->phy) {
....@@ -945,9 +1126,12 @@
9451126 value = afi_readl(pcie, AFI_PCIE_CONFIG);
9461127 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
9471128 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1129
+ value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
9481130
949
- list_for_each_entry(port, &pcie->ports, list)
1131
+ list_for_each_entry(port, &pcie->ports, list) {
9501132 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1133
+ value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1134
+ }
9511135
9521136 afi_writel(pcie, value, AFI_PCIE_CONFIG);
9531137
....@@ -961,20 +1145,10 @@
9611145 afi_writel(pcie, value, AFI_FUSE);
9621146 }
9631147
964
- if (soc->program_uphy) {
965
- err = tegra_pcie_phy_power_on(pcie);
966
- if (err < 0) {
967
- dev_err(dev, "failed to power on PHY(s): %d\n", err);
968
- return err;
969
- }
970
- }
971
-
972
- /* take the PCIe interface module out of reset */
973
- reset_control_deassert(pcie->pcie_xrst);
974
-
975
- /* finally enable PCIe */
1148
+ /* Disable AFI dynamic clock gating and enable PCIe */
9761149 value = afi_readl(pcie, AFI_CONFIGURATION);
9771150 value |= AFI_CONFIGURATION_EN_FPCI;
1151
+ value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
9781152 afi_writel(pcie, value, AFI_CONFIGURATION);
9791153
9801154 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
....@@ -992,22 +1166,6 @@
9921166
9931167 /* disable all exceptions */
9941168 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
995
-
996
- return 0;
997
-}
998
-
999
-static void tegra_pcie_disable_controller(struct tegra_pcie *pcie)
1000
-{
1001
- int err;
1002
-
1003
- reset_control_assert(pcie->pcie_xrst);
1004
-
1005
- if (pcie->soc->program_uphy) {
1006
- err = tegra_pcie_phy_power_off(pcie);
1007
- if (err < 0)
1008
- dev_err(pcie->dev, "failed to power off PHY(s): %d\n",
1009
- err);
1010
- }
10111169 }
10121170
10131171 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
....@@ -1017,13 +1175,11 @@
10171175 int err;
10181176
10191177 reset_control_assert(pcie->afi_rst);
1020
- reset_control_assert(pcie->pex_rst);
10211178
10221179 clk_disable_unprepare(pcie->pll_e);
10231180 if (soc->has_cml_clk)
10241181 clk_disable_unprepare(pcie->cml_clk);
10251182 clk_disable_unprepare(pcie->afi_clk);
1026
- clk_disable_unprepare(pcie->pex_clk);
10271183
10281184 if (!dev->pm_domain)
10291185 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
....@@ -1051,46 +1207,66 @@
10511207 if (err < 0)
10521208 dev_err(dev, "failed to enable regulators: %d\n", err);
10531209
1054
- if (dev->pm_domain) {
1055
- err = clk_prepare_enable(pcie->pex_clk);
1210
+ if (!dev->pm_domain) {
1211
+ err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
10561212 if (err) {
1057
- dev_err(dev, "failed to enable PEX clock: %d\n", err);
1058
- return err;
1213
+ dev_err(dev, "failed to power ungate: %d\n", err);
1214
+ goto regulator_disable;
10591215 }
1060
- reset_control_deassert(pcie->pex_rst);
1061
- } else {
1062
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1063
- pcie->pex_clk,
1064
- pcie->pex_rst);
1216
+ err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
10651217 if (err) {
1066
- dev_err(dev, "powerup sequence failed: %d\n", err);
1067
- return err;
1218
+ dev_err(dev, "failed to remove clamp: %d\n", err);
1219
+ goto powergate;
10681220 }
10691221 }
1070
-
1071
- reset_control_deassert(pcie->afi_rst);
10721222
10731223 err = clk_prepare_enable(pcie->afi_clk);
10741224 if (err < 0) {
10751225 dev_err(dev, "failed to enable AFI clock: %d\n", err);
1076
- return err;
1226
+ goto powergate;
10771227 }
10781228
10791229 if (soc->has_cml_clk) {
10801230 err = clk_prepare_enable(pcie->cml_clk);
10811231 if (err < 0) {
10821232 dev_err(dev, "failed to enable CML clock: %d\n", err);
1083
- return err;
1233
+ goto disable_afi_clk;
10841234 }
10851235 }
10861236
10871237 err = clk_prepare_enable(pcie->pll_e);
10881238 if (err < 0) {
10891239 dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1090
- return err;
1240
+ goto disable_cml_clk;
10911241 }
10921242
1243
+ reset_control_deassert(pcie->afi_rst);
1244
+
10931245 return 0;
1246
+
1247
+disable_cml_clk:
1248
+ if (soc->has_cml_clk)
1249
+ clk_disable_unprepare(pcie->cml_clk);
1250
+disable_afi_clk:
1251
+ clk_disable_unprepare(pcie->afi_clk);
1252
+powergate:
1253
+ if (!dev->pm_domain)
1254
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1255
+regulator_disable:
1256
+ regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1257
+
1258
+ return err;
1259
+}
1260
+
1261
+static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1262
+{
1263
+ const struct tegra_pcie_soc *soc = pcie->soc;
1264
+
1265
+ /* Configure the reference clock driver */
1266
+ pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1267
+
1268
+ if (soc->num_ports > 2)
1269
+ pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
10941270 }
10951271
10961272 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
....@@ -1176,7 +1352,7 @@
11761352 phy = devm_of_phy_get(dev, np, name);
11771353 kfree(name);
11781354
1179
- if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1355
+ if (PTR_ERR(phy) == -ENODEV)
11801356 phy = NULL;
11811357
11821358 return phy;
....@@ -1261,7 +1437,7 @@
12611437 {
12621438 struct device *dev = pcie->dev;
12631439 struct platform_device *pdev = to_platform_device(dev);
1264
- struct resource *pads, *afi, *res;
1440
+ struct resource *res;
12651441 const struct tegra_pcie_soc *soc = pcie->soc;
12661442 int err;
12671443
....@@ -1285,15 +1461,13 @@
12851461 }
12861462 }
12871463
1288
- pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1289
- pcie->pads = devm_ioremap_resource(dev, pads);
1464
+ pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
12901465 if (IS_ERR(pcie->pads)) {
12911466 err = PTR_ERR(pcie->pads);
12921467 goto phys_put;
12931468 }
12941469
1295
- afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1296
- pcie->afi = devm_ioremap_resource(dev, afi);
1470
+ pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
12971471 if (IS_ERR(pcie->afi)) {
12981472 err = PTR_ERR(pcie->afi);
12991473 goto phys_put;
....@@ -1319,10 +1493,8 @@
13191493
13201494 /* request interrupt */
13211495 err = platform_get_irq_byname(pdev, "intr");
1322
- if (err < 0) {
1323
- dev_err(dev, "failed to get IRQ: %d\n", err);
1496
+ if (err < 0)
13241497 goto phys_put;
1325
- }
13261498
13271499 pcie->irq = err;
13281500
....@@ -1537,10 +1709,8 @@
15371709 }
15381710
15391711 err = platform_get_irq_byname(pdev, "msi");
1540
- if (err < 0) {
1541
- dev_err(dev, "failed to get IRQ: %d\n", err);
1542
- goto err;
1543
- }
1712
+ if (err < 0)
1713
+ goto free_irq_domain;
15441714
15451715 msi->irq = err;
15461716
....@@ -1548,17 +1718,35 @@
15481718 tegra_msi_irq_chip.name, pcie);
15491719 if (err < 0) {
15501720 dev_err(dev, "failed to request IRQ: %d\n", err);
1551
- goto err;
1721
+ goto free_irq_domain;
15521722 }
15531723
1554
- /* setup AFI/FPCI range */
1555
- msi->pages = __get_free_pages(GFP_KERNEL, 0);
1556
- msi->phys = virt_to_phys((void *)msi->pages);
1724
+ /* Though the PCIe controller can address >32-bit address space, to
1725
+ * facilitate endpoints that support only 32-bit MSI target address,
1726
+ * the mask is set to 32-bit to make sure that MSI target address is
1727
+ * always a 32-bit address
1728
+ */
1729
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1730
+ if (err < 0) {
1731
+ dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1732
+ goto free_irq;
1733
+ }
1734
+
1735
+ msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1736
+ DMA_ATTR_NO_KERNEL_MAPPING);
1737
+ if (!msi->virt) {
1738
+ dev_err(dev, "failed to allocate DMA memory for MSI\n");
1739
+ err = -ENOMEM;
1740
+ goto free_irq;
1741
+ }
1742
+
15571743 host->msi = &msi->chip;
15581744
15591745 return 0;
15601746
1561
-err:
1747
+free_irq:
1748
+ free_irq(msi->irq, pcie);
1749
+free_irq_domain:
15621750 irq_domain_remove(msi->domain);
15631751 return err;
15641752 }
....@@ -1595,7 +1783,8 @@
15951783 struct tegra_msi *msi = &pcie->msi;
15961784 unsigned int i, irq;
15971785
1598
- free_pages(msi->pages, 0);
1786
+ dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1787
+ DMA_ATTR_NO_KERNEL_MAPPING);
15991788
16001789 if (msi->irq > 0)
16011790 free_irq(msi->irq, pcie);
....@@ -1629,6 +1818,15 @@
16291818 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
16301819
16311820 return 0;
1821
+}
1822
+
1823
+static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1824
+{
1825
+ u32 value;
1826
+
1827
+ value = afi_readl(pcie, AFI_INTR_MASK);
1828
+ value &= ~AFI_INTR_MASK_INT_MASK;
1829
+ afi_writel(pcie, value, AFI_INTR_MASK);
16321830 }
16331831
16341832 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
....@@ -1796,7 +1994,7 @@
17961994 pcie->supplies[i++].supply = "hvdd-pex";
17971995 pcie->supplies[i++].supply = "vddio-pexctl-aud";
17981996 } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1799
- pcie->num_supplies = 6;
1997
+ pcie->num_supplies = 3;
18001998
18011999 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
18022000 sizeof(*pcie->supplies),
....@@ -1804,14 +2002,11 @@
18042002 if (!pcie->supplies)
18052003 return -ENOMEM;
18062004
1807
- pcie->supplies[i++].supply = "avdd-pll-uerefe";
18082005 pcie->supplies[i++].supply = "hvddio-pex";
18092006 pcie->supplies[i++].supply = "dvddio-pex";
1810
- pcie->supplies[i++].supply = "dvdd-pex-pll";
1811
- pcie->supplies[i++].supply = "hvdd-pex-pll-e";
18122007 pcie->supplies[i++].supply = "vddio-pex-ctl";
18132008 } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1814
- pcie->num_supplies = 7;
2009
+ pcie->num_supplies = 4;
18152010
18162011 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
18172012 sizeof(*pcie->supplies),
....@@ -1821,11 +2016,8 @@
18212016
18222017 pcie->supplies[i++].supply = "avddio-pex";
18232018 pcie->supplies[i++].supply = "dvddio-pex";
1824
- pcie->supplies[i++].supply = "avdd-pex-pll";
18252019 pcie->supplies[i++].supply = "hvdd-pex";
1826
- pcie->supplies[i++].supply = "hvdd-pex-pll-e";
18272020 pcie->supplies[i++].supply = "vddio-pex-ctl";
1828
- pcie->supplies[i++].supply = "avdd-pll-erefe";
18292021 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
18302022 bool need_pexa = false, need_pexb = false;
18312023
....@@ -1899,81 +2091,16 @@
18992091 struct device *dev = pcie->dev;
19002092 struct device_node *np = dev->of_node, *port;
19012093 const struct tegra_pcie_soc *soc = pcie->soc;
1902
- struct of_pci_range_parser parser;
1903
- struct of_pci_range range;
19042094 u32 lanes = 0, mask = 0;
19052095 unsigned int lane = 0;
1906
- struct resource res;
19072096 int err;
1908
-
1909
- if (of_pci_range_parser_init(&parser, np)) {
1910
- dev_err(dev, "missing \"ranges\" property\n");
1911
- return -EINVAL;
1912
- }
1913
-
1914
- for_each_of_pci_range(&parser, &range) {
1915
- err = of_pci_range_to_resource(&range, np, &res);
1916
- if (err < 0)
1917
- return err;
1918
-
1919
- switch (res.flags & IORESOURCE_TYPE_BITS) {
1920
- case IORESOURCE_IO:
1921
- /* Track the bus -> CPU I/O mapping offset. */
1922
- pcie->offset.io = res.start - range.pci_addr;
1923
-
1924
- memcpy(&pcie->pio, &res, sizeof(res));
1925
- pcie->pio.name = np->full_name;
1926
-
1927
- /*
1928
- * The Tegra PCIe host bridge uses this to program the
1929
- * mapping of the I/O space to the physical address,
1930
- * so we override the .start and .end fields here that
1931
- * of_pci_range_to_resource() converted to I/O space.
1932
- * We also set the IORESOURCE_MEM type to clarify that
1933
- * the resource is in the physical memory space.
1934
- */
1935
- pcie->io.start = range.cpu_addr;
1936
- pcie->io.end = range.cpu_addr + range.size - 1;
1937
- pcie->io.flags = IORESOURCE_MEM;
1938
- pcie->io.name = "I/O";
1939
-
1940
- memcpy(&res, &pcie->io, sizeof(res));
1941
- break;
1942
-
1943
- case IORESOURCE_MEM:
1944
- /*
1945
- * Track the bus -> CPU memory mapping offset. This
1946
- * assumes that the prefetchable and non-prefetchable
1947
- * regions will be the last of type IORESOURCE_MEM in
1948
- * the ranges property.
1949
- * */
1950
- pcie->offset.mem = res.start - range.pci_addr;
1951
-
1952
- if (res.flags & IORESOURCE_PREFETCH) {
1953
- memcpy(&pcie->prefetch, &res, sizeof(res));
1954
- pcie->prefetch.name = "prefetchable";
1955
- } else {
1956
- memcpy(&pcie->mem, &res, sizeof(res));
1957
- pcie->mem.name = "non-prefetchable";
1958
- }
1959
- break;
1960
- }
1961
- }
1962
-
1963
- err = of_pci_parse_bus_range(np, &pcie->busn);
1964
- if (err < 0) {
1965
- dev_err(dev, "failed to parse ranges property: %d\n", err);
1966
- pcie->busn.name = np->name;
1967
- pcie->busn.start = 0;
1968
- pcie->busn.end = 0xff;
1969
- pcie->busn.flags = IORESOURCE_BUS;
1970
- }
19712097
19722098 /* parse root ports */
19732099 for_each_child_of_node(np, port) {
19742100 struct tegra_pcie_port *rp;
19752101 unsigned int index;
19762102 u32 value;
2103
+ char *label;
19772104
19782105 err = of_pci_get_devfn(port);
19792106 if (err < 0) {
....@@ -2033,8 +2160,36 @@
20332160 rp->np = port;
20342161
20352162 rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2036
- if (IS_ERR(rp->base))
2037
- return PTR_ERR(rp->base);
2163
+ if (IS_ERR(rp->base)) {
2164
+ err = PTR_ERR(rp->base);
2165
+ goto err_node_put;
2166
+ }
2167
+
2168
+ label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2169
+ if (!label) {
2170
+ err = -ENOMEM;
2171
+ goto err_node_put;
2172
+ }
2173
+
2174
+ /*
2175
+ * Returns -ENOENT if reset-gpios property is not populated
2176
+ * and in this case fall back to using AFI per port register
2177
+ * to toggle PERST# SFIO line.
2178
+ */
2179
+ rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
2180
+ "reset-gpios", 0,
2181
+ GPIOD_OUT_LOW,
2182
+ label);
2183
+ if (IS_ERR(rp->reset_gpio)) {
2184
+ if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2185
+ rp->reset_gpio = NULL;
2186
+ } else {
2187
+ dev_err(dev, "failed to get reset GPIO: %ld\n",
2188
+ PTR_ERR(rp->reset_gpio));
2189
+ err = PTR_ERR(rp->reset_gpio);
2190
+ goto err_node_put;
2191
+ }
2192
+ }
20382193
20392194 list_add_tail(&rp->list, &pcie->ports);
20402195 }
....@@ -2087,7 +2242,7 @@
20872242 } while (--timeout);
20882243
20892244 if (!timeout) {
2090
- dev_err(dev, "link %u down, retrying\n", port->index);
2245
+ dev_dbg(dev, "link %u down, retrying\n", port->index);
20912246 goto retry;
20922247 }
20932248
....@@ -2109,6 +2264,64 @@
21092264 return false;
21102265 }
21112266
2267
+static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2268
+{
2269
+ struct device *dev = pcie->dev;
2270
+ struct tegra_pcie_port *port;
2271
+ ktime_t deadline;
2272
+ u32 value;
2273
+
2274
+ list_for_each_entry(port, &pcie->ports, list) {
2275
+ /*
2276
+ * "Supported Link Speeds Vector" in "Link Capabilities 2"
2277
+ * is not supported by Tegra. tegra_pcie_change_link_speed()
2278
+ * is called only for Tegra chips which support Gen2.
2279
+ * So there no harm if supported link speed is not verified.
2280
+ */
2281
+ value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2282
+ value &= ~PCI_EXP_LNKSTA_CLS;
2283
+ value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2284
+ writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2285
+
2286
+ /*
2287
+ * Poll until link comes back from recovery to avoid race
2288
+ * condition.
2289
+ */
2290
+ deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2291
+
2292
+ while (ktime_before(ktime_get(), deadline)) {
2293
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
2294
+ if ((value & PCI_EXP_LNKSTA_LT) == 0)
2295
+ break;
2296
+
2297
+ usleep_range(2000, 3000);
2298
+ }
2299
+
2300
+ if (value & PCI_EXP_LNKSTA_LT)
2301
+ dev_warn(dev, "PCIe port %u link is in recovery\n",
2302
+ port->index);
2303
+
2304
+ /* Retrain the link */
2305
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
2306
+ value |= PCI_EXP_LNKCTL_RL;
2307
+ writel(value, port->base + RP_LINK_CONTROL_STATUS);
2308
+
2309
+ deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2310
+
2311
+ while (ktime_before(ktime_get(), deadline)) {
2312
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
2313
+ if ((value & PCI_EXP_LNKSTA_LT) == 0)
2314
+ break;
2315
+
2316
+ usleep_range(2000, 3000);
2317
+ }
2318
+
2319
+ if (value & PCI_EXP_LNKSTA_LT)
2320
+ dev_err(dev, "failed to retrain link of port %u\n",
2321
+ port->index);
2322
+ }
2323
+}
2324
+
21122325 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
21132326 {
21142327 struct device *dev = pcie->dev;
....@@ -2119,7 +2332,12 @@
21192332 port->index, port->lanes);
21202333
21212334 tegra_pcie_port_enable(port);
2335
+ }
21222336
2337
+ /* Start LTSSM from Tegra side */
2338
+ reset_control_deassert(pcie->pcie_xrst);
2339
+
2340
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
21232341 if (tegra_pcie_port_check_link(port))
21242342 continue;
21252343
....@@ -2128,11 +2346,16 @@
21282346 tegra_pcie_port_disable(port);
21292347 tegra_pcie_port_free(port);
21302348 }
2349
+
2350
+ if (pcie->soc->has_gen2)
2351
+ tegra_pcie_change_link_speed(pcie);
21312352 }
21322353
21332354 static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
21342355 {
21352356 struct tegra_pcie_port *port, *tmp;
2357
+
2358
+ reset_control_assert(pcie->pcie_xrst);
21362359
21372360 list_for_each_entry_safe(port, tmp, &pcie->ports, list)
21382361 tegra_pcie_port_disable(port);
....@@ -2157,6 +2380,11 @@
21572380 .has_gen2 = false,
21582381 .force_pca_enable = false,
21592382 .program_uphy = true,
2383
+ .update_clamp_threshold = false,
2384
+ .program_deskew_time = false,
2385
+ .update_fc_timer = false,
2386
+ .has_cache_bars = true,
2387
+ .ectl.enable = false,
21602388 };
21612389
21622390 static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
....@@ -2169,6 +2397,7 @@
21692397 .num_ports = 3,
21702398 .ports = tegra30_pcie_ports,
21712399 .msi_base_shift = 8,
2400
+ .afi_pex2_ctrl = 0x128,
21722401 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
21732402 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
21742403 .pads_refclk_cfg0 = 0xfa5cfa5c,
....@@ -2180,6 +2409,11 @@
21802409 .has_gen2 = false,
21812410 .force_pca_enable = false,
21822411 .program_uphy = true,
2412
+ .update_clamp_threshold = false,
2413
+ .program_deskew_time = false,
2414
+ .update_fc_timer = false,
2415
+ .has_cache_bars = false,
2416
+ .ectl.enable = false,
21832417 };
21842418
21852419 static const struct tegra_pcie_soc tegra124_pcie = {
....@@ -2196,6 +2430,11 @@
21962430 .has_gen2 = true,
21972431 .force_pca_enable = false,
21982432 .program_uphy = true,
2433
+ .update_clamp_threshold = true,
2434
+ .program_deskew_time = false,
2435
+ .update_fc_timer = false,
2436
+ .has_cache_bars = false,
2437
+ .ectl.enable = false,
21992438 };
22002439
22012440 static const struct tegra_pcie_soc tegra210_pcie = {
....@@ -2205,6 +2444,8 @@
22052444 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
22062445 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
22072446 .pads_refclk_cfg0 = 0x90b890b8,
2447
+ /* FC threshold is bit[25:18] */
2448
+ .update_fc_threshold = 0x01800000,
22082449 .has_pex_clkreq_en = true,
22092450 .has_pex_bias_ctrl = true,
22102451 .has_intr_prsnt_sense = true,
....@@ -2212,6 +2453,23 @@
22122453 .has_gen2 = true,
22132454 .force_pca_enable = true,
22142455 .program_uphy = true,
2456
+ .update_clamp_threshold = true,
2457
+ .program_deskew_time = true,
2458
+ .update_fc_timer = true,
2459
+ .has_cache_bars = false,
2460
+ .ectl = {
2461
+ .regs = {
2462
+ .rp_ectl_2_r1 = 0x0000000f,
2463
+ .rp_ectl_4_r1 = 0x00000067,
2464
+ .rp_ectl_5_r1 = 0x55010000,
2465
+ .rp_ectl_6_r1 = 0x00000001,
2466
+ .rp_ectl_2_r2 = 0x0000008f,
2467
+ .rp_ectl_4_r2 = 0x000000c7,
2468
+ .rp_ectl_5_r2 = 0x55010000,
2469
+ .rp_ectl_6_r2 = 0x00000001,
2470
+ },
2471
+ .enable = true,
2472
+ },
22152473 };
22162474
22172475 static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
....@@ -2224,6 +2482,7 @@
22242482 .num_ports = 3,
22252483 .ports = tegra186_pcie_ports,
22262484 .msi_base_shift = 8,
2485
+ .afi_pex2_ctrl = 0x19c,
22272486 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
22282487 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
22292488 .pads_refclk_cfg0 = 0x80b880b8,
....@@ -2235,6 +2494,11 @@
22352494 .has_gen2 = true,
22362495 .force_pca_enable = false,
22372496 .program_uphy = false,
2497
+ .update_clamp_threshold = false,
2498
+ .program_deskew_time = false,
2499
+ .update_fc_timer = false,
2500
+ .has_cache_bars = false,
2501
+ .ectl.enable = false,
22382502 };
22392503
22402504 static const struct of_device_id tegra_pcie_of_match[] = {
....@@ -2304,36 +2568,14 @@
23042568 return 0;
23052569 }
23062570
2307
-static const struct seq_operations tegra_pcie_ports_seq_ops = {
2571
+static const struct seq_operations tegra_pcie_ports_sops = {
23082572 .start = tegra_pcie_ports_seq_start,
23092573 .next = tegra_pcie_ports_seq_next,
23102574 .stop = tegra_pcie_ports_seq_stop,
23112575 .show = tegra_pcie_ports_seq_show,
23122576 };
23132577
2314
-static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2315
-{
2316
- struct tegra_pcie *pcie = inode->i_private;
2317
- struct seq_file *s;
2318
- int err;
2319
-
2320
- err = seq_open(file, &tegra_pcie_ports_seq_ops);
2321
- if (err)
2322
- return err;
2323
-
2324
- s = file->private_data;
2325
- s->private = pcie;
2326
-
2327
- return 0;
2328
-}
2329
-
2330
-static const struct file_operations tegra_pcie_ports_ops = {
2331
- .owner = THIS_MODULE,
2332
- .open = tegra_pcie_ports_open,
2333
- .read = seq_read,
2334
- .llseek = seq_lseek,
2335
- .release = seq_release,
2336
-};
2578
+DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
23372579
23382580 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
23392581 {
....@@ -2341,24 +2583,12 @@
23412583 pcie->debugfs = NULL;
23422584 }
23432585
2344
-static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2586
+static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
23452587 {
2346
- struct dentry *file;
2347
-
23482588 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2349
- if (!pcie->debugfs)
2350
- return -ENOMEM;
23512589
2352
- file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2353
- pcie, &tegra_pcie_ports_ops);
2354
- if (!file)
2355
- goto remove;
2356
-
2357
- return 0;
2358
-
2359
-remove:
2360
- tegra_pcie_debugfs_exit(pcie);
2361
- return -ENOMEM;
2590
+ debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
2591
+ &tegra_pcie_ports_fops);
23622592 }
23632593
23642594 static int tegra_pcie_probe(struct platform_device *pdev)
....@@ -2366,7 +2596,6 @@
23662596 struct device *dev = &pdev->dev;
23672597 struct pci_host_bridge *host;
23682598 struct tegra_pcie *pcie;
2369
- struct pci_bus *child;
23702599 int err;
23712600
23722601 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
....@@ -2404,40 +2633,20 @@
24042633 goto pm_runtime_put;
24052634 }
24062635
2407
- err = tegra_pcie_request_resources(pcie);
2408
- if (err)
2409
- goto pm_runtime_put;
2410
-
2411
- host->busnr = pcie->busn.start;
2412
- host->dev.parent = &pdev->dev;
24132636 host->ops = &tegra_pcie_ops;
24142637 host->map_irq = tegra_pcie_map_irq;
2415
- host->swizzle_irq = pci_common_swizzle;
24162638
2417
- err = pci_scan_root_bus_bridge(host);
2639
+ err = pci_host_probe(host);
24182640 if (err < 0) {
24192641 dev_err(dev, "failed to register host: %d\n", err);
2420
- goto free_resources;
2642
+ goto pm_runtime_put;
24212643 }
24222644
2423
- pci_bus_size_bridges(host->bus);
2424
- pci_bus_assign_resources(host->bus);
2425
-
2426
- list_for_each_entry(child, &host->bus->children, node)
2427
- pcie_bus_configure_settings(child);
2428
-
2429
- pci_bus_add_devices(host->bus);
2430
-
2431
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2432
- err = tegra_pcie_debugfs_init(pcie);
2433
- if (err < 0)
2434
- dev_err(dev, "failed to setup debugfs: %d\n", err);
2435
- }
2645
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
2646
+ tegra_pcie_debugfs_init(pcie);
24362647
24372648 return 0;
24382649
2439
-free_resources:
2440
- tegra_pcie_free_resources(pcie);
24412650 pm_runtime_put:
24422651 pm_runtime_put_sync(pcie->dev);
24432652 pm_runtime_disable(pcie->dev);
....@@ -2458,7 +2667,6 @@
24582667
24592668 pci_stop_root_bus(host->bus);
24602669 pci_remove_root_bus(host->bus);
2461
- tegra_pcie_free_resources(pcie);
24622670 pm_runtime_put_sync(pcie->dev);
24632671 pm_runtime_disable(pcie->dev);
24642672
....@@ -2477,16 +2685,32 @@
24772685 {
24782686 struct tegra_pcie *pcie = dev_get_drvdata(dev);
24792687 struct tegra_pcie_port *port;
2688
+ int err;
24802689
24812690 list_for_each_entry(port, &pcie->ports, list)
24822691 tegra_pcie_pme_turnoff(port);
24832692
24842693 tegra_pcie_disable_ports(pcie);
24852694
2695
+ /*
2696
+ * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2697
+ * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2698
+ */
2699
+ tegra_pcie_disable_interrupts(pcie);
2700
+
2701
+ if (pcie->soc->program_uphy) {
2702
+ err = tegra_pcie_phy_power_off(pcie);
2703
+ if (err < 0)
2704
+ dev_err(dev, "failed to power off PHY(s): %d\n", err);
2705
+ }
2706
+
2707
+ reset_control_assert(pcie->pex_rst);
2708
+ clk_disable_unprepare(pcie->pex_clk);
2709
+
24862710 if (IS_ENABLED(CONFIG_PCI_MSI))
24872711 tegra_pcie_disable_msi(pcie);
24882712
2489
- tegra_pcie_disable_controller(pcie);
2713
+ pinctrl_pm_select_idle_state(dev);
24902714 tegra_pcie_power_off(pcie);
24912715
24922716 return 0;
....@@ -2502,20 +2726,45 @@
25022726 dev_err(dev, "tegra pcie power on fail: %d\n", err);
25032727 return err;
25042728 }
2505
- err = tegra_pcie_enable_controller(pcie);
2506
- if (err) {
2507
- dev_err(dev, "tegra pcie controller enable fail: %d\n", err);
2729
+
2730
+ err = pinctrl_pm_select_default_state(dev);
2731
+ if (err < 0) {
2732
+ dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
25082733 goto poweroff;
25092734 }
2735
+
2736
+ tegra_pcie_enable_controller(pcie);
25102737 tegra_pcie_setup_translations(pcie);
25112738
25122739 if (IS_ENABLED(CONFIG_PCI_MSI))
25132740 tegra_pcie_enable_msi(pcie);
25142741
2742
+ err = clk_prepare_enable(pcie->pex_clk);
2743
+ if (err) {
2744
+ dev_err(dev, "failed to enable PEX clock: %d\n", err);
2745
+ goto pex_dpd_enable;
2746
+ }
2747
+
2748
+ reset_control_deassert(pcie->pex_rst);
2749
+
2750
+ if (pcie->soc->program_uphy) {
2751
+ err = tegra_pcie_phy_power_on(pcie);
2752
+ if (err < 0) {
2753
+ dev_err(dev, "failed to power on PHY(s): %d\n", err);
2754
+ goto disable_pex_clk;
2755
+ }
2756
+ }
2757
+
2758
+ tegra_pcie_apply_pad_settings(pcie);
25152759 tegra_pcie_enable_ports(pcie);
25162760
25172761 return 0;
25182762
2763
+disable_pex_clk:
2764
+ reset_control_assert(pcie->pex_rst);
2765
+ clk_disable_unprepare(pcie->pex_clk);
2766
+pex_dpd_enable:
2767
+ pinctrl_pm_select_idle_state(dev);
25192768 poweroff:
25202769 tegra_pcie_power_off(pcie);
25212770