| .. | .. |
|---|
| 8 | 8 | * Author: Simon Xue <xxm@rock-chips.com> |
|---|
| 9 | 9 | */ |
|---|
| 10 | 10 | |
|---|
| 11 | +#include <dt-bindings/phy/phy.h> |
|---|
| 11 | 12 | #include <linux/clk.h> |
|---|
| 12 | 13 | #include <linux/delay.h> |
|---|
| 13 | 14 | #include <linux/fs.h> |
|---|
| 14 | 15 | #include <linux/gpio.h> |
|---|
| 15 | 16 | #include <linux/init.h> |
|---|
| 16 | 17 | #include <linux/interrupt.h> |
|---|
| 18 | +#include <linux/iopoll.h> |
|---|
| 17 | 19 | #include <linux/irq.h> |
|---|
| 18 | 20 | #include <linux/irqchip/chained_irq.h> |
|---|
| 19 | 21 | #include <linux/irqdomain.h> |
|---|
| .. | .. |
|---|
| 29 | 31 | #include <linux/of_pci.h> |
|---|
| 30 | 32 | #include <linux/pci.h> |
|---|
| 31 | 33 | #include <linux/phy/phy.h> |
|---|
| 34 | +#include <linux/phy/pcie.h> |
|---|
| 32 | 35 | #include <linux/platform_device.h> |
|---|
| 33 | 36 | #include <linux/poll.h> |
|---|
| 34 | 37 | #include <linux/regmap.h> |
|---|
| 35 | 38 | #include <linux/reset.h> |
|---|
| 36 | 39 | #include <linux/resource.h> |
|---|
| 40 | +#include <linux/rfkill-wlan.h> |
|---|
| 37 | 41 | #include <linux/signal.h> |
|---|
| 38 | 42 | #include <linux/types.h> |
|---|
| 39 | 43 | #include <linux/uaccess.h> |
|---|
| .. | .. |
|---|
| 49 | 53 | RK_PCIE_RC_TYPE, |
|---|
| 50 | 54 | }; |
|---|
| 51 | 55 | |
|---|
| 52 | | -struct reset_bulk_data { |
|---|
| 53 | | - const char *id; |
|---|
| 54 | | - struct reset_control *rst; |
|---|
| 55 | | -}; |
|---|
| 56 | +#define RK_PCIE_DBG 0 |
|---|
| 56 | 57 | |
|---|
| 57 | 58 | #define PCIE_DMA_OFFSET 0x380000 |
|---|
| 58 | 59 | |
|---|
| 60 | +#define PCIE_DMA_CTRL_OFF 0x8 |
|---|
| 59 | 61 | #define PCIE_DMA_WR_ENB 0xc |
|---|
| 60 | 62 | #define PCIE_DMA_WR_CTRL_LO 0x200 |
|---|
| 61 | 63 | #define PCIE_DMA_WR_CTRL_HI 0x204 |
|---|
| .. | .. |
|---|
| 99 | 101 | |
|---|
| 100 | 102 | #define PCIE_CAP_LINK_CONTROL2_LINK_STATUS 0xa0 |
|---|
| 101 | 103 | |
|---|
| 104 | +#define PCIE_CLIENT_INTR_STATUS_MSG_RX 0x04 |
|---|
| 105 | +#define PME_TO_ACK (BIT(9) | BIT(25)) |
|---|
| 102 | 106 | #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x08 |
|---|
| 103 | 107 | #define PCIE_CLIENT_INTR_STATUS_MISC 0x10 |
|---|
| 104 | 108 | #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c |
|---|
| .. | .. |
|---|
| 106 | 110 | #define MASK_LEGACY_INT(x) (0x00110011 << x) |
|---|
| 107 | 111 | #define UNMASK_LEGACY_INT(x) (0x00110000 << x) |
|---|
| 108 | 112 | #define PCIE_CLIENT_INTR_MASK 0x24 |
|---|
| 113 | +#define PCIE_CLIENT_POWER 0x2c |
|---|
| 114 | +#define READY_ENTER_L23 BIT(3) |
|---|
| 115 | +#define PCIE_CLIENT_MSG_GEN 0x34 |
|---|
| 116 | +#define PME_TURN_OFF (BIT(4) | BIT(20)) |
|---|
| 109 | 117 | #define PCIE_CLIENT_GENERAL_DEBUG 0x104 |
|---|
| 110 | 118 | #define PCIE_CLIENT_HOT_RESET_CTRL 0x180 |
|---|
| 119 | +#define PCIE_LTSSM_APP_DLY1_EN BIT(0) |
|---|
| 120 | +#define PCIE_LTSSM_APP_DLY2_EN BIT(1) |
|---|
| 121 | +#define PCIE_LTSSM_APP_DLY1_DONE BIT(2) |
|---|
| 122 | +#define PCIE_LTSSM_APP_DLY2_DONE BIT(3) |
|---|
| 111 | 123 | #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4) |
|---|
| 112 | 124 | #define PCIE_CLIENT_LTSSM_STATUS 0x300 |
|---|
| 113 | 125 | #define SMLH_LINKUP BIT(16) |
|---|
| 114 | 126 | #define RDLH_LINKUP BIT(17) |
|---|
| 127 | +#define PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN 0x154 |
|---|
| 115 | 128 | #define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310 |
|---|
| 116 | 129 | #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320 |
|---|
| 117 | 130 | #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324 |
|---|
| .. | .. |
|---|
| 119 | 132 | #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c |
|---|
| 120 | 133 | #define PCIE_CLIENT_DBG_FIFO_STATUS 0x350 |
|---|
| 121 | 134 | #define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000 |
|---|
| 122 | | -#define PCIE_CLIENT_DBF_EN 0xffff0003 |
|---|
| 123 | | -#define RK_PCIE_DBG 0 |
|---|
| 135 | +#define PCIE_CLIENT_DBF_EN 0xffff0007 |
|---|
| 124 | 136 | |
|---|
| 125 | 137 | #define PCIE_PHY_LINKUP BIT(0) |
|---|
| 126 | 138 | #define PCIE_DATA_LINKUP BIT(1) |
|---|
| 127 | 139 | |
|---|
| 128 | | -#define PCIE_RESBAR_CTRL_REG0_REG 0x2a8 |
|---|
| 140 | +#define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000 |
|---|
| 129 | 141 | #define PCIE_SB_BAR0_MASK_REG 0x100010 |
|---|
| 130 | 142 | |
|---|
| 131 | 143 | #define PCIE_PL_ORDER_RULE_CTRL_OFF 0x8B4 |
|---|
| 144 | +#define RK_PCIE_L2_TMOUT_US 5000 |
|---|
| 145 | +#define RK_PCIE_HOTRESET_TMOUT_US 10000 |
|---|
| 146 | +#define RK_PCIE_ENUM_HW_RETRYIES 2 |
|---|
| 147 | + |
|---|
| 148 | +enum rk_pcie_ltssm_code { |
|---|
| 149 | + S_L0 = 0x11, |
|---|
| 150 | + S_L0S = 0x12, |
|---|
| 151 | + S_L1_IDLE = 0x14, |
|---|
| 152 | + S_L2_IDLE = 0x15, |
|---|
| 153 | + S_MAX = 0x1f, |
|---|
| 154 | +}; |
|---|
| 132 | 155 | |
|---|
| 133 | 156 | struct rk_pcie { |
|---|
| 134 | 157 | struct dw_pcie *pci; |
|---|
| 135 | 158 | enum rk_pcie_device_mode mode; |
|---|
| 136 | 159 | enum phy_mode phy_mode; |
|---|
| 160 | + int phy_sub_mode; |
|---|
| 137 | 161 | unsigned char bar_to_atu[6]; |
|---|
| 138 | 162 | phys_addr_t *outbound_addr; |
|---|
| 139 | 163 | unsigned long *ib_window_map; |
|---|
| .. | .. |
|---|
| 144 | 168 | void __iomem *apb_base; |
|---|
| 145 | 169 | struct phy *phy; |
|---|
| 146 | 170 | struct clk_bulk_data *clks; |
|---|
| 171 | + struct reset_control *rsts; |
|---|
| 147 | 172 | unsigned int clk_cnt; |
|---|
| 148 | | - struct reset_bulk_data *rsts; |
|---|
| 149 | 173 | struct gpio_desc *rst_gpio; |
|---|
| 174 | + u32 perst_inactive_ms; |
|---|
| 175 | + struct gpio_desc *prsnt_gpio; |
|---|
| 150 | 176 | phys_addr_t mem_start; |
|---|
| 151 | 177 | size_t mem_size; |
|---|
| 152 | 178 | struct pcie_port pp; |
|---|
| .. | .. |
|---|
| 154 | 180 | struct regmap *pmu_grf; |
|---|
| 155 | 181 | struct dma_trx_obj *dma_obj; |
|---|
| 156 | 182 | bool in_suspend; |
|---|
| 183 | + bool skip_scan_in_resume; |
|---|
| 157 | 184 | bool is_rk1808; |
|---|
| 158 | 185 | bool is_signal_test; |
|---|
| 159 | 186 | bool bifurcation; |
|---|
| 187 | + bool supports_clkreq; |
|---|
| 160 | 188 | struct regulator *vpcie3v3; |
|---|
| 161 | 189 | struct irq_domain *irq_domain; |
|---|
| 162 | 190 | raw_spinlock_t intx_lock; |
|---|
| 191 | + u16 aspm; |
|---|
| 192 | + u32 l1ss_ctl1; |
|---|
| 163 | 193 | struct dentry *debugfs; |
|---|
| 194 | + u32 msi_vector_num; |
|---|
| 195 | + struct workqueue_struct *hot_rst_wq; |
|---|
| 196 | + struct work_struct hot_rst_work; |
|---|
| 197 | + u32 comp_prst[2]; |
|---|
| 198 | + u32 intx; |
|---|
| 164 | 199 | }; |
|---|
| 165 | 200 | |
|---|
| 166 | 201 | struct rk_pcie_of_data { |
|---|
| 167 | 202 | enum rk_pcie_device_mode mode; |
|---|
| 203 | + u32 msi_vector_num; |
|---|
| 168 | 204 | }; |
|---|
| 169 | 205 | |
|---|
| 170 | 206 | #define to_rk_pcie(x) dev_get_drvdata((x)->dev) |
|---|
| 171 | | -static const struct dev_pm_ops rockchip_dw_pcie_pm_ops; |
|---|
| 207 | +static int rk_pcie_disable_power(struct rk_pcie *rk_pcie); |
|---|
| 208 | +static int rk_pcie_enable_power(struct rk_pcie *rk_pcie); |
|---|
| 172 | 209 | |
|---|
| 173 | 210 | static int rk_pcie_read(void __iomem *addr, int size, u32 *val) |
|---|
| 174 | 211 | { |
|---|
| .. | .. |
|---|
| 253 | 290 | return 0; |
|---|
| 254 | 291 | } |
|---|
| 255 | 292 | |
|---|
| 293 | +static void rk_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) |
|---|
| 294 | +{ |
|---|
| 295 | + int ret; |
|---|
| 296 | + |
|---|
| 297 | + if (pci->ops->write_dbi) { |
|---|
| 298 | + pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val); |
|---|
| 299 | + return; |
|---|
| 300 | + } |
|---|
| 301 | + |
|---|
| 302 | + ret = dw_pcie_write(pci->atu_base + reg, 4, val); |
|---|
| 303 | + if (ret) |
|---|
| 304 | + dev_err(pci->dev, "Write ATU address failed\n"); |
|---|
| 305 | +} |
|---|
| 306 | + |
|---|
| 307 | +static void rk_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, |
|---|
| 308 | + u32 val) |
|---|
| 309 | +{ |
|---|
| 310 | + u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); |
|---|
| 311 | + |
|---|
| 312 | + rk_pcie_writel_atu(pci, offset + reg, val); |
|---|
| 313 | +} |
|---|
| 314 | + |
|---|
| 315 | +static u32 rk_pcie_readl_atu(struct dw_pcie *pci, u32 reg) |
|---|
| 316 | +{ |
|---|
| 317 | + int ret; |
|---|
| 318 | + u32 val; |
|---|
| 319 | + |
|---|
| 320 | + if (pci->ops->read_dbi) |
|---|
| 321 | + return pci->ops->read_dbi(pci, pci->atu_base, reg, 4); |
|---|
| 322 | + |
|---|
| 323 | + ret = dw_pcie_read(pci->atu_base + reg, 4, &val); |
|---|
| 324 | + if (ret) |
|---|
| 325 | + dev_err(pci->dev, "Read ATU address failed\n"); |
|---|
| 326 | + |
|---|
| 327 | + return val; |
|---|
| 328 | +} |
|---|
| 329 | + |
|---|
| 330 | +static u32 rk_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) |
|---|
| 331 | +{ |
|---|
| 332 | + u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); |
|---|
| 333 | + |
|---|
| 334 | + return rk_pcie_readl_atu(pci, offset + reg); |
|---|
| 335 | +} |
|---|
| 336 | + |
|---|
| 337 | +static int rk_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no, |
|---|
| 338 | + int index, int bar, u64 cpu_addr, |
|---|
| 339 | + enum dw_pcie_as_type as_type) |
|---|
| 340 | +{ |
|---|
| 341 | + int type; |
|---|
| 342 | + u32 retries, val; |
|---|
| 343 | + |
|---|
| 344 | + rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, |
|---|
| 345 | + lower_32_bits(cpu_addr)); |
|---|
| 346 | + rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, |
|---|
| 347 | + upper_32_bits(cpu_addr)); |
|---|
| 348 | + |
|---|
| 349 | + switch (as_type) { |
|---|
| 350 | + case DW_PCIE_AS_MEM: |
|---|
| 351 | + type = PCIE_ATU_TYPE_MEM; |
|---|
| 352 | + break; |
|---|
| 353 | + case DW_PCIE_AS_IO: |
|---|
| 354 | + type = PCIE_ATU_TYPE_IO; |
|---|
| 355 | + break; |
|---|
| 356 | + default: |
|---|
| 357 | + return -EINVAL; |
|---|
| 358 | + } |
|---|
| 359 | + |
|---|
| 360 | + rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type | |
|---|
| 361 | + PCIE_ATU_FUNC_NUM(func_no)); |
|---|
| 362 | + rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, |
|---|
| 363 | + PCIE_ATU_FUNC_NUM_MATCH_EN | |
|---|
| 364 | + PCIE_ATU_ENABLE | |
|---|
| 365 | + PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); |
|---|
| 366 | + |
|---|
| 367 | + /* |
|---|
| 368 | + * Make sure ATU enable takes effect before any subsequent config |
|---|
| 369 | + * and I/O accesses. |
|---|
| 370 | + */ |
|---|
| 371 | + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { |
|---|
| 372 | + val = rk_pcie_readl_ib_unroll(pci, index, |
|---|
| 373 | + PCIE_ATU_UNR_REGION_CTRL2); |
|---|
| 374 | + if (val & PCIE_ATU_ENABLE) |
|---|
| 375 | + return 0; |
|---|
| 376 | + |
|---|
| 377 | + mdelay(LINK_WAIT_IATU); |
|---|
| 378 | + } |
|---|
| 379 | + dev_err(pci->dev, "Inbound iATU is not being enabled\n"); |
|---|
| 380 | + |
|---|
| 381 | + return -EBUSY; |
|---|
| 382 | +} |
|---|
| 383 | + |
|---|
| 384 | + |
|---|
| 385 | +static int rk_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, |
|---|
| 386 | + int bar, u64 cpu_addr, |
|---|
| 387 | + enum dw_pcie_as_type as_type) |
|---|
| 388 | +{ |
|---|
| 389 | + int type; |
|---|
| 390 | + u32 retries, val; |
|---|
| 391 | + |
|---|
| 392 | + if (pci->iatu_unroll_enabled) |
|---|
| 393 | + return rk_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar, |
|---|
| 394 | + cpu_addr, as_type); |
|---|
| 395 | + |
|---|
| 396 | + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | |
|---|
| 397 | + index); |
|---|
| 398 | + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); |
|---|
| 399 | + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); |
|---|
| 400 | + |
|---|
| 401 | + switch (as_type) { |
|---|
| 402 | + case DW_PCIE_AS_MEM: |
|---|
| 403 | + type = PCIE_ATU_TYPE_MEM; |
|---|
| 404 | + break; |
|---|
| 405 | + case DW_PCIE_AS_IO: |
|---|
| 406 | + type = PCIE_ATU_TYPE_IO; |
|---|
| 407 | + break; |
|---|
| 408 | + default: |
|---|
| 409 | + return -EINVAL; |
|---|
| 410 | + } |
|---|
| 411 | + |
|---|
| 412 | + dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | |
|---|
| 413 | + PCIE_ATU_FUNC_NUM(func_no)); |
|---|
| 414 | + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE | |
|---|
| 415 | + PCIE_ATU_FUNC_NUM_MATCH_EN | |
|---|
| 416 | + PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); |
|---|
| 417 | + |
|---|
| 418 | + /* |
|---|
| 419 | + * Make sure ATU enable takes effect before any subsequent config |
|---|
| 420 | + * and I/O accesses. |
|---|
| 421 | + */ |
|---|
| 422 | + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { |
|---|
| 423 | + val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); |
|---|
| 424 | + if (val & PCIE_ATU_ENABLE) |
|---|
| 425 | + return 0; |
|---|
| 426 | + |
|---|
| 427 | + mdelay(LINK_WAIT_IATU); |
|---|
| 428 | + } |
|---|
| 429 | + dev_err(pci->dev, "Inbound iATU is not being enabled\n"); |
|---|
| 430 | + |
|---|
| 431 | + return -EBUSY; |
|---|
| 432 | +} |
|---|
| 433 | + |
|---|
| 256 | 434 | static int rk_pcie_ep_inbound_atu(struct rk_pcie *rk_pcie, |
|---|
| 257 | 435 | enum pci_barno bar, dma_addr_t cpu_addr, |
|---|
| 258 | 436 | enum dw_pcie_as_type as_type) |
|---|
| 259 | 437 | { |
|---|
| 260 | 438 | int ret; |
|---|
| 261 | 439 | u32 free_win; |
|---|
| 440 | + u8 func_no = 0x0; |
|---|
| 262 | 441 | |
|---|
| 263 | 442 | if (rk_pcie->in_suspend) { |
|---|
| 264 | 443 | free_win = rk_pcie->bar_to_atu[bar]; |
|---|
| .. | .. |
|---|
| 271 | 450 | } |
|---|
| 272 | 451 | } |
|---|
| 273 | 452 | |
|---|
| 274 | | - ret = dw_pcie_prog_inbound_atu(rk_pcie->pci, free_win, bar, cpu_addr, |
|---|
| 275 | | - as_type); |
|---|
| 453 | + ret = rk_pcie_prog_inbound_atu(rk_pcie->pci, func_no, free_win, bar, |
|---|
| 454 | + cpu_addr, as_type); |
|---|
| 276 | 455 | if (ret < 0) { |
|---|
| 277 | 456 | dev_err(rk_pcie->pci->dev, "Failed to program IB window\n"); |
|---|
| 278 | 457 | return ret; |
|---|
| .. | .. |
|---|
| 285 | 464 | set_bit(free_win, rk_pcie->ib_window_map); |
|---|
| 286 | 465 | |
|---|
| 287 | 466 | return 0; |
|---|
| 467 | +} |
|---|
| 468 | + |
|---|
| 469 | +static void rk_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, |
|---|
| 470 | + u32 val) |
|---|
| 471 | +{ |
|---|
| 472 | + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); |
|---|
| 473 | + |
|---|
| 474 | + rk_pcie_writel_atu(pci, offset + reg, val); |
|---|
| 475 | +} |
|---|
| 476 | + |
|---|
| 477 | +static u32 rk_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) |
|---|
| 478 | +{ |
|---|
| 479 | + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); |
|---|
| 480 | + |
|---|
| 481 | + return rk_pcie_readl_atu(pci, offset + reg); |
|---|
| 482 | +} |
|---|
| 483 | + |
|---|
| 484 | +static void rk_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no, |
|---|
| 485 | + int index, int type, |
|---|
| 486 | + u64 cpu_addr, u64 pci_addr, |
|---|
| 487 | + u32 size) |
|---|
| 488 | +{ |
|---|
| 489 | + u32 retries, val; |
|---|
| 490 | + u64 limit_addr = cpu_addr + size - 1; |
|---|
| 491 | + |
|---|
| 492 | + rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, |
|---|
| 493 | + lower_32_bits(cpu_addr)); |
|---|
| 494 | + rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, |
|---|
| 495 | + upper_32_bits(cpu_addr)); |
|---|
| 496 | + rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT, |
|---|
| 497 | + lower_32_bits(limit_addr)); |
|---|
| 498 | + rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT, |
|---|
| 499 | + upper_32_bits(limit_addr)); |
|---|
| 500 | + rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, |
|---|
| 501 | + lower_32_bits(pci_addr)); |
|---|
| 502 | + rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, |
|---|
| 503 | + upper_32_bits(pci_addr)); |
|---|
| 504 | + rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, |
|---|
| 505 | + type | PCIE_ATU_FUNC_NUM(func_no)); |
|---|
| 506 | + rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, |
|---|
| 507 | + PCIE_ATU_ENABLE); |
|---|
| 508 | + |
|---|
| 509 | + /* |
|---|
| 510 | + * Make sure ATU enable takes effect before any subsequent config |
|---|
| 511 | + * and I/O accesses. |
|---|
| 512 | + */ |
|---|
| 513 | + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { |
|---|
| 514 | + val = rk_pcie_readl_ob_unroll(pci, index, |
|---|
| 515 | + PCIE_ATU_UNR_REGION_CTRL2); |
|---|
| 516 | + if (val & PCIE_ATU_ENABLE) |
|---|
| 517 | + return; |
|---|
| 518 | + |
|---|
| 519 | + mdelay(LINK_WAIT_IATU); |
|---|
| 520 | + } |
|---|
| 521 | + dev_err(pci->dev, "Outbound iATU is not being enabled\n"); |
|---|
| 522 | +} |
|---|
| 523 | + |
|---|
| 524 | +static void rk_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, |
|---|
| 525 | + int type, u64 cpu_addr, u64 pci_addr, u32 size) |
|---|
| 526 | +{ |
|---|
| 527 | + u32 retries, val; |
|---|
| 528 | + |
|---|
| 529 | + if (pci->ops->cpu_addr_fixup) |
|---|
| 530 | + cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); |
|---|
| 531 | + |
|---|
| 532 | + if (pci->iatu_unroll_enabled) { |
|---|
| 533 | + rk_pcie_prog_outbound_atu_unroll(pci, 0x0, index, type, |
|---|
| 534 | + cpu_addr, pci_addr, size); |
|---|
| 535 | + return; |
|---|
| 536 | + } |
|---|
| 537 | + |
|---|
| 538 | + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, |
|---|
| 539 | + PCIE_ATU_REGION_OUTBOUND | index); |
|---|
| 540 | + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, |
|---|
| 541 | + lower_32_bits(cpu_addr)); |
|---|
| 542 | + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, |
|---|
| 543 | + upper_32_bits(cpu_addr)); |
|---|
| 544 | + dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, |
|---|
| 545 | + lower_32_bits(cpu_addr + size - 1)); |
|---|
| 546 | + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, |
|---|
| 547 | + lower_32_bits(pci_addr)); |
|---|
| 548 | + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, |
|---|
| 549 | + upper_32_bits(pci_addr)); |
|---|
| 550 | + dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | |
|---|
| 551 | + PCIE_ATU_FUNC_NUM(0x0)); |
|---|
| 552 | + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); |
|---|
| 553 | + |
|---|
| 554 | + /* |
|---|
| 555 | + * Make sure ATU enable takes effect before any subsequent config |
|---|
| 556 | + * and I/O accesses. |
|---|
| 557 | + */ |
|---|
| 558 | + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { |
|---|
| 559 | + val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); |
|---|
| 560 | + if (val & PCIE_ATU_ENABLE) |
|---|
| 561 | + return; |
|---|
| 562 | + |
|---|
| 563 | + mdelay(LINK_WAIT_IATU); |
|---|
| 564 | + } |
|---|
| 565 | + dev_err(pci->dev, "Outbound iATU is not being enabled\n"); |
|---|
| 288 | 566 | } |
|---|
| 289 | 567 | |
|---|
| 290 | 568 | static int rk_pcie_ep_outbound_atu(struct rk_pcie *rk_pcie, |
|---|
| .. | .. |
|---|
| 305 | 583 | } |
|---|
| 306 | 584 | } |
|---|
| 307 | 585 | |
|---|
| 308 | | - dw_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM, |
|---|
| 586 | + rk_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM, |
|---|
| 309 | 587 | phys_addr, pci_addr, size); |
|---|
| 310 | 588 | |
|---|
| 311 | 589 | if (rk_pcie->in_suspend) |
|---|
| .. | .. |
|---|
| 362 | 640 | return 0; |
|---|
| 363 | 641 | } |
|---|
| 364 | 642 | |
|---|
| 643 | +#if defined(CONFIG_PCIEASPM) |
|---|
| 644 | +static void disable_aspm_l1ss(struct rk_pcie *rk_pcie) |
|---|
| 645 | +{ |
|---|
| 646 | + u32 val, cfg_link_cap_l1sub; |
|---|
| 647 | + |
|---|
| 648 | + val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_L1SS); |
|---|
| 649 | + if (!val) { |
|---|
| 650 | + dev_err(rk_pcie->pci->dev, "can't find l1ss cap\n"); |
|---|
| 651 | + |
|---|
| 652 | + return; |
|---|
| 653 | + } |
|---|
| 654 | + |
|---|
| 655 | + cfg_link_cap_l1sub = val + PCI_L1SS_CAP; |
|---|
| 656 | + |
|---|
| 657 | + val = dw_pcie_readl_dbi(rk_pcie->pci, cfg_link_cap_l1sub); |
|---|
| 658 | + val &= ~(PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2 | PCI_L1SS_CAP_L1_PM_SS); |
|---|
| 659 | + dw_pcie_writel_dbi(rk_pcie->pci, cfg_link_cap_l1sub, val); |
|---|
| 660 | +} |
|---|
| 661 | +#else |
|---|
| 662 | +static inline void disable_aspm_l1ss(struct rk_pcie *rk_pcie) { return; } |
|---|
| 663 | +#endif |
|---|
| 664 | + |
|---|
| 365 | 665 | static inline void rk_pcie_set_mode(struct rk_pcie *rk_pcie) |
|---|
| 366 | 666 | { |
|---|
| 367 | 667 | switch (rk_pcie->mode) { |
|---|
| .. | .. |
|---|
| 369 | 669 | rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00000); |
|---|
| 370 | 670 | break; |
|---|
| 371 | 671 | case RK_PCIE_RC_TYPE: |
|---|
| 672 | + if (rk_pcie->supports_clkreq) { |
|---|
| 673 | + /* Application is ready to have reference clock removed */ |
|---|
| 674 | + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x00010001); |
|---|
| 675 | + } else { |
|---|
| 676 | + /* Pull down CLKREQ# to assert the connecting CLOCK_GEN OE */ |
|---|
| 677 | + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x30011000); |
|---|
| 678 | + disable_aspm_l1ss(rk_pcie); |
|---|
| 679 | + } |
|---|
| 372 | 680 | rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00040); |
|---|
| 373 | 681 | /* |
|---|
| 374 | 682 | * Disable order rule for CPL can't pass halted P queue. |
|---|
| .. | .. |
|---|
| 398 | 706 | rk_pcie_writel_apb(rk_pcie, 0x0, 0xC000C); |
|---|
| 399 | 707 | } |
|---|
| 400 | 708 | |
|---|
| 401 | | -static int rk_pcie_link_up(struct dw_pcie *pci) |
|---|
| 402 | | -{ |
|---|
| 403 | | - struct rk_pcie *rk_pcie = to_rk_pcie(pci); |
|---|
| 404 | | - u32 val; |
|---|
| 405 | | - |
|---|
| 406 | | - if (rk_pcie->is_rk1808) { |
|---|
| 407 | | - val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG); |
|---|
| 408 | | - if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3 && |
|---|
| 409 | | - ((val & GENMASK(15, 10)) >> 10) == 0x11) |
|---|
| 410 | | - return 1; |
|---|
| 411 | | - } else { |
|---|
| 412 | | - val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS); |
|---|
| 413 | | - if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000 && |
|---|
| 414 | | - (val & GENMASK(5, 0)) == 0x11) |
|---|
| 415 | | - return 1; |
|---|
| 416 | | - } |
|---|
| 417 | | - |
|---|
| 418 | | - return 0; |
|---|
| 419 | | -} |
|---|
| 420 | | - |
|---|
| 421 | 709 | static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie) |
|---|
| 422 | 710 | { |
|---|
| 423 | | -#if RK_PCIE_DBG |
|---|
| 711 | + if (!IS_ENABLED(CONFIG_DEBUG_FS)) |
|---|
| 712 | + return; |
|---|
| 424 | 713 | if (rk_pcie->is_rk1808 == true) |
|---|
| 425 | 714 | return; |
|---|
| 426 | 715 | |
|---|
| .. | .. |
|---|
| 434 | 723 | PCIE_CLIENT_DBG_TRANSITION_DATA); |
|---|
| 435 | 724 | rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON, |
|---|
| 436 | 725 | PCIE_CLIENT_DBF_EN); |
|---|
| 437 | | -#endif |
|---|
| 438 | 726 | } |
|---|
| 439 | 727 | |
|---|
| 440 | 728 | static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie) |
|---|
| .. | .. |
|---|
| 453 | 741 | |
|---|
| 454 | 742 | static int rk_pcie_establish_link(struct dw_pcie *pci) |
|---|
| 455 | 743 | { |
|---|
| 456 | | - int retries; |
|---|
| 744 | + int retries, power; |
|---|
| 457 | 745 | struct rk_pcie *rk_pcie = to_rk_pcie(pci); |
|---|
| 746 | + bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj; |
|---|
| 747 | + int hw_retries = 0; |
|---|
| 748 | + u32 ltssm; |
|---|
| 458 | 749 | |
|---|
| 459 | | - if (dw_pcie_link_up(pci)) { |
|---|
| 750 | + /* |
|---|
| 751 | + * For standard RC, even if the link has been setup by firmware, |
|---|
| 752 | + * we still need to reset link as we need to remove all resource info |
|---|
| 753 | + * from devices, for instance BAR, as it wasn't assigned by kernel. |
|---|
| 754 | + */ |
|---|
| 755 | + if (dw_pcie_link_up(pci) && !std_rc) { |
|---|
| 460 | 756 | dev_err(pci->dev, "link is already up\n"); |
|---|
| 461 | 757 | return 0; |
|---|
| 462 | 758 | } |
|---|
| 463 | 759 | |
|---|
| 464 | | - rk_pcie_disable_ltssm(rk_pcie); |
|---|
| 465 | | - rk_pcie_link_status_clear(rk_pcie); |
|---|
| 466 | | - rk_pcie_enable_debug(rk_pcie); |
|---|
| 760 | + for (hw_retries = 0; hw_retries < RK_PCIE_ENUM_HW_RETRYIES; hw_retries++) { |
|---|
| 761 | + /* Rest the device */ |
|---|
| 762 | + gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0); |
|---|
| 467 | 763 | |
|---|
| 468 | | - /* Enable client reset or link down interrupt */ |
|---|
| 469 | | - rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0x40000); |
|---|
| 764 | + rk_pcie_disable_ltssm(rk_pcie); |
|---|
| 765 | + rk_pcie_link_status_clear(rk_pcie); |
|---|
| 766 | + rk_pcie_enable_debug(rk_pcie); |
|---|
| 470 | 767 | |
|---|
| 471 | | - /* Enable LTSSM */ |
|---|
| 472 | | - rk_pcie_enable_ltssm(rk_pcie); |
|---|
| 768 | + /* Enable client reset or link down interrupt */ |
|---|
| 769 | + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0x40000); |
|---|
| 473 | 770 | |
|---|
| 474 | | - /* |
|---|
| 475 | | - * PCIe requires the refclk to be stable for 100µs prior to releasing |
|---|
| 476 | | - * PERST and T_PVPERL (Power stable to PERST# inactive) should be a |
|---|
| 477 | | - * minimum of 100ms. See table 2-4 in section 2.6.2 AC, the PCI Express |
|---|
| 478 | | - * Card Electromechanical Specification 3.0. So 100ms in total is the min |
|---|
| 479 | | - * requuirement here. We add a 1s for sake of hoping everthings work fine. |
|---|
| 480 | | - */ |
|---|
| 481 | | - msleep(1000); |
|---|
| 482 | | - gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1); |
|---|
| 771 | + /* Enable LTSSM */ |
|---|
| 772 | + rk_pcie_enable_ltssm(rk_pcie); |
|---|
| 483 | 773 | |
|---|
| 484 | | - for (retries = 0; retries < 10; retries++) { |
|---|
| 485 | | - if (dw_pcie_link_up(pci)) { |
|---|
| 486 | | - /* |
|---|
| 487 | | - * We may be here in case of L0 in Gen1. But if EP is capable |
|---|
| 488 | | - * of Gen2 or Gen3, Gen switch may happen just in this time, but |
|---|
| 489 | | - * we keep on accessing devices in unstable link status. Given |
|---|
| 490 | | - * that LTSSM max timeout is 24ms per period, we can wait a bit |
|---|
| 491 | | - * more for Gen switch. |
|---|
| 492 | | - */ |
|---|
| 493 | | - msleep(100); |
|---|
| 494 | | - dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n", |
|---|
| 495 | | - rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS)); |
|---|
| 496 | | - rk_pcie_debug_dump(rk_pcie); |
|---|
| 497 | | - return 0; |
|---|
| 774 | + /* |
|---|
| 775 | + * In resume routine, function devices' resume function must be late after |
|---|
| 776 | + * controllers'. Some devices, such as Wi-Fi, need special IO setting before |
|---|
| 777 | + * finishing training. So there must be timeout here. These kinds of devices |
|---|
| 778 | + * need rescan devices by its driver when used. So no need to waste time waiting |
|---|
| 779 | + * for training pass. |
|---|
| 780 | + */ |
|---|
| 781 | + if (rk_pcie->in_suspend && rk_pcie->skip_scan_in_resume) { |
|---|
| 782 | + rfkill_get_wifi_power_state(&power); |
|---|
| 783 | + if (!power) { |
|---|
| 784 | + gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1); |
|---|
| 785 | + return 0; |
|---|
| 786 | + } |
|---|
| 498 | 787 | } |
|---|
| 499 | 788 | |
|---|
| 500 | | - dev_info_ratelimited(pci->dev, "PCIe Linking... LTSSM is 0x%x\n", |
|---|
| 501 | | - rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS)); |
|---|
| 502 | | - rk_pcie_debug_dump(rk_pcie); |
|---|
| 503 | | - msleep(1000); |
|---|
| 504 | | - } |
|---|
| 789 | + /* |
|---|
| 790 | + * PCIe requires the refclk to be stable for 100µs prior to releasing |
|---|
| 791 | + * PERST and T_PVPERL (Power stable to PERST# inactive) should be a |
|---|
| 792 | + * minimum of 100ms. See table 2-4 in section 2.6.2 AC, the PCI Express |
|---|
| 793 | + * Card Electromechanical Specification 3.0. So 100ms in total is the min |
|---|
| 794 | + * requuirement here. We add a 200ms by default for sake of hoping everthings |
|---|
| 795 | + * work fine. If it doesn't, please add more in DT node by add rockchip,perst-inactive-ms. |
|---|
| 796 | + */ |
|---|
| 797 | + msleep(rk_pcie->perst_inactive_ms); |
|---|
| 798 | + gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1); |
|---|
| 505 | 799 | |
|---|
| 506 | | - dev_err(pci->dev, "PCIe Link Fail\n"); |
|---|
| 800 | + /* |
|---|
| 801 | + * Add this 1ms delay because we observe link is always up stably after it and |
|---|
| 802 | + * could help us save 20ms for scanning devices. |
|---|
| 803 | + */ |
|---|
| 804 | + usleep_range(1000, 1100); |
|---|
| 805 | + |
|---|
| 806 | + for (retries = 0; retries < 100; retries++) { |
|---|
| 807 | + if (dw_pcie_link_up(pci)) { |
|---|
| 808 | + /* |
|---|
| 809 | + * We may be here in case of L0 in Gen1. But if EP is capable |
|---|
| 810 | + * of Gen2 or Gen3, Gen switch may happen just in this time, but |
|---|
| 811 | + * we keep on accessing devices in unstable link status. Given |
|---|
| 812 | + * that LTSSM max timeout is 24ms per period, we can wait a bit |
|---|
| 813 | + * more for Gen switch. |
|---|
| 814 | + */ |
|---|
| 815 | + msleep(50); |
|---|
| 816 | + /* In case link drop after linkup, double check it */ |
|---|
| 817 | + if (dw_pcie_link_up(pci)) { |
|---|
| 818 | + dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n", |
|---|
| 819 | + rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS)); |
|---|
| 820 | + rk_pcie_debug_dump(rk_pcie); |
|---|
| 821 | + return 0; |
|---|
| 822 | + } |
|---|
| 823 | + } |
|---|
| 824 | + |
|---|
| 825 | + dev_info_ratelimited(pci->dev, "PCIe Linking... LTSSM is 0x%x\n", |
|---|
| 826 | + rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS)); |
|---|
| 827 | + rk_pcie_debug_dump(rk_pcie); |
|---|
| 828 | + msleep(20); |
|---|
| 829 | + } |
|---|
| 830 | + |
|---|
| 831 | + /* |
|---|
| 832 | + * In response to the situation where PCIe peripherals cannot be |
|---|
| 833 | + * enumerated due tosignal abnormalities, reset PERST# and reset |
|---|
| 834 | + * the peripheral power supply, then restart the enumeration. |
|---|
| 835 | + */ |
|---|
| 836 | + ltssm = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS); |
|---|
| 837 | + dev_err(pci->dev, "PCIe Link Fail, LTSSM is 0x%x, hw_retries=%d\n", ltssm, hw_retries); |
|---|
| 838 | + if (ltssm >= 3 && !rk_pcie->is_signal_test) { |
|---|
| 839 | + rk_pcie_disable_power(rk_pcie); |
|---|
| 840 | + msleep(1000); |
|---|
| 841 | + rk_pcie_enable_power(rk_pcie); |
|---|
| 842 | + } else { |
|---|
| 843 | + break; |
|---|
| 844 | + } |
|---|
| 845 | + } |
|---|
| 507 | 846 | |
|---|
| 508 | 847 | return rk_pcie->is_signal_test == true ? 0 : -EINVAL; |
|---|
| 509 | 848 | } |
|---|
| 510 | 849 | |
|---|
| 850 | +static bool rk_pcie_udma_enabled(struct rk_pcie *rk_pcie) |
|---|
| 851 | +{ |
|---|
| 852 | + return dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + |
|---|
| 853 | + PCIE_DMA_CTRL_OFF); |
|---|
| 854 | +} |
|---|
| 855 | + |
|---|
| 511 | 856 | static int rk_pcie_init_dma_trx(struct rk_pcie *rk_pcie) |
|---|
| 512 | 857 | { |
|---|
| 858 | + if (!rk_pcie_udma_enabled(rk_pcie)) |
|---|
| 859 | + return 0; |
|---|
| 860 | + |
|---|
| 513 | 861 | rk_pcie->dma_obj = rk_pcie_dma_obj_probe(rk_pcie->pci->dev); |
|---|
| 514 | 862 | if (IS_ERR(rk_pcie->dma_obj)) { |
|---|
| 515 | 863 | dev_err(rk_pcie->pci->dev, "failed to prepare dma object\n"); |
|---|
| 516 | 864 | return -EINVAL; |
|---|
| 865 | + } else if (rk_pcie->dma_obj) { |
|---|
| 866 | + goto out; |
|---|
| 517 | 867 | } |
|---|
| 518 | 868 | |
|---|
| 519 | | - rk_pcie->dma_obj = pcie_dw_dmatest_register(rk_pcie->pci, true); |
|---|
| 869 | + rk_pcie->dma_obj = pcie_dw_dmatest_register(rk_pcie->pci->dev, true); |
|---|
| 520 | 870 | if (IS_ERR(rk_pcie->dma_obj)) { |
|---|
| 521 | 871 | dev_err(rk_pcie->pci->dev, "failed to prepare dmatest\n"); |
|---|
| 522 | 872 | return -EINVAL; |
|---|
| 523 | 873 | } |
|---|
| 524 | | - |
|---|
| 874 | +out: |
|---|
| 525 | 875 | /* Enable client write and read interrupt */ |
|---|
| 526 | 876 | rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000); |
|---|
| 527 | 877 | |
|---|
| .. | .. |
|---|
| 534 | 884 | return 0; |
|---|
| 535 | 885 | } |
|---|
| 536 | 886 | |
|---|
| 887 | +static int rk_pci_find_resbar_capability(struct rk_pcie *rk_pcie) |
|---|
| 888 | +{ |
|---|
| 889 | + u32 header; |
|---|
| 890 | + int ttl; |
|---|
| 891 | + int start = 0; |
|---|
| 892 | + int pos = PCI_CFG_SPACE_SIZE; |
|---|
| 893 | + int cap = PCI_EXT_CAP_ID_REBAR; |
|---|
| 894 | + |
|---|
| 895 | + /* minimum 8 bytes per capability */ |
|---|
| 896 | + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; |
|---|
| 897 | + |
|---|
| 898 | + header = dw_pcie_readl_dbi(rk_pcie->pci, pos); |
|---|
| 899 | + |
|---|
| 900 | + /* |
|---|
| 901 | + * If we have no capabilities, this is indicated by cap ID, |
|---|
| 902 | + * cap version and next pointer all being 0. |
|---|
| 903 | + */ |
|---|
| 904 | + if (header == 0) |
|---|
| 905 | + return 0; |
|---|
| 906 | + |
|---|
| 907 | + while (ttl-- > 0) { |
|---|
| 908 | + if (PCI_EXT_CAP_ID(header) == cap && pos != start) |
|---|
| 909 | + return pos; |
|---|
| 910 | + |
|---|
| 911 | + pos = PCI_EXT_CAP_NEXT(header); |
|---|
| 912 | + if (pos < PCI_CFG_SPACE_SIZE) |
|---|
| 913 | + break; |
|---|
| 914 | + |
|---|
| 915 | + header = dw_pcie_readl_dbi(rk_pcie->pci, pos); |
|---|
| 916 | + if (!header) |
|---|
| 917 | + break; |
|---|
| 918 | + } |
|---|
| 919 | + |
|---|
| 920 | + return 0; |
|---|
| 921 | +} |
|---|
| 922 | + |
|---|
| 923 | +#ifdef MODULE |
|---|
| 924 | +void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) |
|---|
| 925 | +{ |
|---|
| 926 | + int ret; |
|---|
| 927 | + |
|---|
| 928 | + if (pci->ops && pci->ops->write_dbi2) { |
|---|
| 929 | + pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val); |
|---|
| 930 | + return; |
|---|
| 931 | + } |
|---|
| 932 | + |
|---|
| 933 | + ret = dw_pcie_write(pci->dbi_base2 + reg, size, val); |
|---|
| 934 | + if (ret) |
|---|
| 935 | + dev_err(pci->dev, "write DBI address failed\n"); |
|---|
| 936 | +} |
|---|
| 937 | +#endif |
|---|
| 938 | + |
|---|
| 939 | +static int rk_pcie_ep_set_bar_flag(struct rk_pcie *rk_pcie, enum pci_barno barno, int flags) |
|---|
| 940 | +{ |
|---|
| 941 | + enum pci_barno bar = barno; |
|---|
| 942 | + u32 reg; |
|---|
| 943 | + |
|---|
| 944 | + reg = PCI_BASE_ADDRESS_0 + (4 * bar); |
|---|
| 945 | + |
|---|
| 946 | + /* Disabled the upper 32bits BAR to make a 64bits bar pair */ |
|---|
| 947 | + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) |
|---|
| 948 | + dw_pcie_writel_dbi2(rk_pcie->pci, reg + 4, 0); |
|---|
| 949 | + |
|---|
| 950 | + dw_pcie_writel_dbi(rk_pcie->pci, reg, flags); |
|---|
| 951 | + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) |
|---|
| 952 | + dw_pcie_writel_dbi(rk_pcie->pci, reg + 4, 0); |
|---|
| 953 | + |
|---|
| 954 | + return 0; |
|---|
| 955 | +} |
|---|
| 956 | + |
|---|
| 537 | 957 | static void rk_pcie_ep_setup(struct rk_pcie *rk_pcie) |
|---|
| 538 | 958 | { |
|---|
| 539 | 959 | int ret; |
|---|
| .. | .. |
|---|
| 541 | 961 | u32 lanes; |
|---|
| 542 | 962 | struct device *dev = rk_pcie->pci->dev; |
|---|
| 543 | 963 | struct device_node *np = dev->of_node; |
|---|
| 964 | + int resbar_base; |
|---|
| 965 | + int bar; |
|---|
| 544 | 966 | |
|---|
| 545 | 967 | /* Enable client write and read interrupt */ |
|---|
| 546 | 968 | rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000); |
|---|
| .. | .. |
|---|
| 604 | 1026 | /* Enable bus master and memory space */ |
|---|
| 605 | 1027 | dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_STATUS_COMMAND_REG, 0x6); |
|---|
| 606 | 1028 | |
|---|
| 607 | | - /* Resize BAR0 to 4GB */ |
|---|
| 608 | | - /* bit13-8 set to 6 means 64MB */ |
|---|
| 609 | | - dw_pcie_writel_dbi(rk_pcie->pci, PCIE_RESBAR_CTRL_REG0_REG, 0x600); |
|---|
| 1029 | + resbar_base = rk_pci_find_resbar_capability(rk_pcie); |
|---|
| 1030 | + if (!resbar_base) { |
|---|
| 1031 | + dev_warn(dev, "failed to find resbar_base\n"); |
|---|
| 1032 | + } else { |
|---|
| 1033 | + /* Resize BAR0 to support 512GB, BAR1 to support 8M, BAR2~5 to support 64M */ |
|---|
| 1034 | + dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4, 0xfffff0); |
|---|
| 1035 | + dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8, 0x13c0); |
|---|
| 1036 | + dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0xc, 0xfffff0); |
|---|
| 1037 | + dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x10, 0x3c0); |
|---|
| 1038 | + for (bar = 2; bar < 6; bar++) { |
|---|
| 1039 | + dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0); |
|---|
| 1040 | + dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8 + bar * 0x8, 0x6c0); |
|---|
| 1041 | + } |
|---|
| 610 | 1042 | |
|---|
| 611 | | - /* Set shadow BAR0 according 64MB */ |
|---|
| 612 | | - val = rk_pcie->mem_size - 1; |
|---|
| 613 | | - dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val); |
|---|
| 1043 | + /* Set flags */ |
|---|
| 1044 | + rk_pcie_ep_set_bar_flag(rk_pcie, BAR_0, PCI_BASE_ADDRESS_MEM_TYPE_32); |
|---|
| 1045 | + rk_pcie_ep_set_bar_flag(rk_pcie, BAR_1, PCI_BASE_ADDRESS_MEM_TYPE_32); |
|---|
| 1046 | + rk_pcie_ep_set_bar_flag(rk_pcie, BAR_2, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64); |
|---|
| 1047 | + rk_pcie_ep_set_bar_flag(rk_pcie, BAR_4, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64); |
|---|
| 1048 | + } |
|---|
| 614 | 1049 | |
|---|
| 615 | | - /* Set reserved memory address to BAR0 */ |
|---|
| 616 | | - dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_BAR0_REG, |
|---|
| 617 | | - rk_pcie->mem_start); |
|---|
| 1050 | + /* Device id and class id needed for request bar address */ |
|---|
| 1051 | + dw_pcie_writew_dbi(rk_pcie->pci, PCI_DEVICE_ID, 0x356a); |
|---|
| 1052 | + dw_pcie_writew_dbi(rk_pcie->pci, PCI_CLASS_DEVICE, 0x0580); |
|---|
| 1053 | + |
|---|
| 1054 | + /* Set shadow BAR0 */ |
|---|
| 1055 | + if (rk_pcie->is_rk1808) { |
|---|
| 1056 | + val = rk_pcie->mem_size - 1; |
|---|
| 1057 | + dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val); |
|---|
| 1058 | + } |
|---|
| 618 | 1059 | } |
|---|
| 619 | 1060 | |
|---|
| 620 | 1061 | static int rk_pcie_ep_win_parse(struct rk_pcie *rk_pcie) |
|---|
| .. | .. |
|---|
| 675 | 1116 | return 0; |
|---|
| 676 | 1117 | } |
|---|
| 677 | 1118 | |
|---|
| 1119 | +static void rk_pcie_msi_set_num_vectors(struct pcie_port *pp) |
|---|
| 1120 | +{ |
|---|
| 1121 | + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|---|
| 1122 | + struct rk_pcie *rk_pcie = to_rk_pcie(pci); |
|---|
| 1123 | + |
|---|
| 1124 | + pp->num_vectors = rk_pcie->msi_vector_num; |
|---|
| 1125 | +} |
|---|
| 1126 | + |
|---|
| 678 | 1127 | static int rk_pcie_host_init(struct pcie_port *pp) |
|---|
| 679 | 1128 | { |
|---|
| 680 | 1129 | int ret; |
|---|
| 681 | 1130 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|---|
| 682 | 1131 | |
|---|
| 683 | 1132 | dw_pcie_setup_rc(pp); |
|---|
| 1133 | + |
|---|
| 1134 | + /* Disable BAR0 BAR1 */ |
|---|
| 1135 | + dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, 0x0); |
|---|
| 1136 | + dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_1, 0x0); |
|---|
| 684 | 1137 | |
|---|
| 685 | 1138 | ret = rk_pcie_establish_link(pci); |
|---|
| 686 | 1139 | |
|---|
| .. | .. |
|---|
| 707 | 1160 | if (pp->msi_irq < 0) { |
|---|
| 708 | 1161 | dev_info(dev, "use outband MSI support"); |
|---|
| 709 | 1162 | rk_pcie_host_ops.msi_host_init = rk_pcie_msi_host_init; |
|---|
| 1163 | + } else { |
|---|
| 1164 | + dev_info(dev, "max MSI vector is %d\n", rk_pcie->msi_vector_num); |
|---|
| 1165 | + rk_pcie_host_ops.set_num_vectors = rk_pcie_msi_set_num_vectors; |
|---|
| 710 | 1166 | } |
|---|
| 711 | 1167 | } |
|---|
| 712 | 1168 | |
|---|
| 713 | 1169 | pp->ops = &rk_pcie_host_ops; |
|---|
| 714 | | - |
|---|
| 715 | | - if (device_property_read_bool(dev, "msi-map")) |
|---|
| 716 | | - pp->msi_ext = 1; |
|---|
| 717 | 1170 | |
|---|
| 718 | 1171 | ret = dw_pcie_host_init(pp); |
|---|
| 719 | 1172 | if (ret) { |
|---|
| .. | .. |
|---|
| 753 | 1206 | return ret; |
|---|
| 754 | 1207 | } |
|---|
| 755 | 1208 | |
|---|
| 1209 | + rk_pcie->pci->atu_base = rk_pcie->pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; |
|---|
| 756 | 1210 | rk_pcie->pci->iatu_unroll_enabled = rk_pcie_iatu_unroll_enabled(rk_pcie->pci); |
|---|
| 757 | 1211 | |
|---|
| 758 | 1212 | ret = rk_pcie_ep_atu_init(rk_pcie); |
|---|
| .. | .. |
|---|
| 769 | 1223 | return ret; |
|---|
| 770 | 1224 | } |
|---|
| 771 | 1225 | |
|---|
| 772 | | - return 0; |
|---|
| 773 | | -} |
|---|
| 1226 | + if (!rk_pcie_udma_enabled(rk_pcie)) |
|---|
| 1227 | + return 0; |
|---|
| 774 | 1228 | |
|---|
| 775 | | -static void rk_pcie_clk_deinit(struct rk_pcie *rk_pcie) |
|---|
| 776 | | -{ |
|---|
| 777 | | - clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks); |
|---|
| 778 | | - clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks); |
|---|
| 1229 | + return 0; |
|---|
| 779 | 1230 | } |
|---|
| 780 | 1231 | |
|---|
| 781 | 1232 | static int rk_pcie_clk_init(struct rk_pcie *rk_pcie) |
|---|
| 782 | 1233 | { |
|---|
| 783 | 1234 | struct device *dev = rk_pcie->pci->dev; |
|---|
| 784 | | - struct property *prop; |
|---|
| 785 | | - const char *name; |
|---|
| 786 | | - int i = 0, ret, count; |
|---|
| 1235 | + int ret; |
|---|
| 787 | 1236 | |
|---|
| 788 | | - count = of_property_count_strings(dev->of_node, "clock-names"); |
|---|
| 789 | | - if (count < 1) |
|---|
| 1237 | + rk_pcie->clk_cnt = devm_clk_bulk_get_all(dev, &rk_pcie->clks); |
|---|
| 1238 | + if (rk_pcie->clk_cnt < 1) |
|---|
| 790 | 1239 | return -ENODEV; |
|---|
| 791 | 1240 | |
|---|
| 792 | | - rk_pcie->clks = devm_kcalloc(dev, count, |
|---|
| 793 | | - sizeof(struct clk_bulk_data), |
|---|
| 794 | | - GFP_KERNEL); |
|---|
| 795 | | - if (!rk_pcie->clks) |
|---|
| 796 | | - return -ENOMEM; |
|---|
| 797 | | - |
|---|
| 798 | | - rk_pcie->clk_cnt = count; |
|---|
| 799 | | - |
|---|
| 800 | | - of_property_for_each_string(dev->of_node, "clock-names", prop, name) { |
|---|
| 801 | | - rk_pcie->clks[i].id = name; |
|---|
| 802 | | - if (!rk_pcie->clks[i].id) |
|---|
| 803 | | - return -ENOMEM; |
|---|
| 804 | | - i++; |
|---|
| 805 | | - } |
|---|
| 806 | | - |
|---|
| 807 | | - ret = devm_clk_bulk_get(dev, count, rk_pcie->clks); |
|---|
| 808 | | - if (ret) |
|---|
| 809 | | - return ret; |
|---|
| 810 | | - |
|---|
| 811 | | - ret = clk_bulk_prepare(count, rk_pcie->clks); |
|---|
| 812 | | - if (ret) |
|---|
| 813 | | - return ret; |
|---|
| 814 | | - |
|---|
| 815 | | - ret = clk_bulk_enable(count, rk_pcie->clks); |
|---|
| 1241 | + ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks); |
|---|
| 816 | 1242 | if (ret) { |
|---|
| 817 | | - clk_bulk_unprepare(count, rk_pcie->clks); |
|---|
| 1243 | + dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret); |
|---|
| 818 | 1244 | return ret; |
|---|
| 819 | 1245 | } |
|---|
| 820 | 1246 | |
|---|
| .. | .. |
|---|
| 839 | 1265 | return PTR_ERR(rk_pcie->dbi_base); |
|---|
| 840 | 1266 | |
|---|
| 841 | 1267 | rk_pcie->pci->dbi_base = rk_pcie->dbi_base; |
|---|
| 1268 | + rk_pcie->pci->dbi_base2 = rk_pcie->pci->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET; |
|---|
| 842 | 1269 | |
|---|
| 843 | 1270 | apb_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
|---|
| 844 | 1271 | "pcie-apb"); |
|---|
| .. | .. |
|---|
| 865 | 1292 | return PTR_ERR(rk_pcie->rst_gpio); |
|---|
| 866 | 1293 | } |
|---|
| 867 | 1294 | |
|---|
| 1295 | + if (device_property_read_u32(&pdev->dev, "rockchip,perst-inactive-ms", |
|---|
| 1296 | + &rk_pcie->perst_inactive_ms)) |
|---|
| 1297 | + rk_pcie->perst_inactive_ms = 200; |
|---|
| 1298 | + |
|---|
| 1299 | + rk_pcie->prsnt_gpio = devm_gpiod_get_optional(&pdev->dev, "prsnt", GPIOD_IN); |
|---|
| 1300 | + if (IS_ERR_OR_NULL(rk_pcie->prsnt_gpio)) |
|---|
| 1301 | + dev_info(&pdev->dev, "invalid prsnt-gpios property in node\n"); |
|---|
| 1302 | + |
|---|
| 868 | 1303 | return 0; |
|---|
| 869 | 1304 | } |
|---|
| 870 | 1305 | |
|---|
| .. | .. |
|---|
| 873 | 1308 | int ret; |
|---|
| 874 | 1309 | struct device *dev = rk_pcie->pci->dev; |
|---|
| 875 | 1310 | |
|---|
| 876 | | - rk_pcie->phy = devm_phy_get(dev, "pcie-phy"); |
|---|
| 1311 | + rk_pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); |
|---|
| 877 | 1312 | if (IS_ERR(rk_pcie->phy)) { |
|---|
| 878 | 1313 | if (PTR_ERR(rk_pcie->phy) != -EPROBE_DEFER) |
|---|
| 879 | 1314 | dev_info(dev, "missing phy\n"); |
|---|
| .. | .. |
|---|
| 882 | 1317 | |
|---|
| 883 | 1318 | switch (rk_pcie->mode) { |
|---|
| 884 | 1319 | case RK_PCIE_RC_TYPE: |
|---|
| 885 | | - rk_pcie->phy_mode = PHY_MODE_PCIE_RC; |
|---|
| 1320 | + rk_pcie->phy_mode = PHY_MODE_PCIE; /* make no sense */ |
|---|
| 1321 | + rk_pcie->phy_sub_mode = PHY_MODE_PCIE_RC; |
|---|
| 886 | 1322 | break; |
|---|
| 887 | 1323 | case RK_PCIE_EP_TYPE: |
|---|
| 888 | | - rk_pcie->phy_mode = PHY_MODE_PCIE_EP; |
|---|
| 1324 | + rk_pcie->phy_mode = PHY_MODE_PCIE; |
|---|
| 1325 | + rk_pcie->phy_sub_mode = PHY_MODE_PCIE_EP; |
|---|
| 889 | 1326 | break; |
|---|
| 890 | 1327 | } |
|---|
| 891 | 1328 | |
|---|
| 892 | | - ret = phy_set_mode(rk_pcie->phy, rk_pcie->phy_mode); |
|---|
| 1329 | + ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode, |
|---|
| 1330 | + rk_pcie->phy_sub_mode); |
|---|
| 893 | 1331 | if (ret) { |
|---|
| 894 | 1332 | dev_err(dev, "fail to set phy to mode %s, err %d\n", |
|---|
| 895 | | - (rk_pcie->phy_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP", |
|---|
| 1333 | + (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP", |
|---|
| 896 | 1334 | ret); |
|---|
| 897 | 1335 | return ret; |
|---|
| 898 | 1336 | } |
|---|
| 899 | 1337 | |
|---|
| 900 | 1338 | if (rk_pcie->bifurcation) |
|---|
| 901 | | - ret = phy_set_mode(rk_pcie->phy, PHY_MODE_PCIE_BIFURCATION); |
|---|
| 1339 | + phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode, |
|---|
| 1340 | + PHY_MODE_PCIE_BIFURCATION); |
|---|
| 902 | 1341 | |
|---|
| 903 | 1342 | ret = phy_init(rk_pcie->phy); |
|---|
| 904 | 1343 | if (ret < 0) { |
|---|
| .. | .. |
|---|
| 907 | 1346 | } |
|---|
| 908 | 1347 | |
|---|
| 909 | 1348 | phy_power_on(rk_pcie->phy); |
|---|
| 910 | | - |
|---|
| 911 | | - return 0; |
|---|
| 912 | | -} |
|---|
| 913 | | - |
|---|
| 914 | | -static int rk_pcie_reset_control_release(struct rk_pcie *rk_pcie) |
|---|
| 915 | | -{ |
|---|
| 916 | | - struct device *dev = rk_pcie->pci->dev; |
|---|
| 917 | | - struct property *prop; |
|---|
| 918 | | - const char *name; |
|---|
| 919 | | - int ret, count, i = 0; |
|---|
| 920 | | - |
|---|
| 921 | | - count = of_property_count_strings(dev->of_node, "reset-names"); |
|---|
| 922 | | - if (count < 1) |
|---|
| 923 | | - return -ENODEV; |
|---|
| 924 | | - |
|---|
| 925 | | - rk_pcie->rsts = devm_kcalloc(dev, count, |
|---|
| 926 | | - sizeof(struct reset_bulk_data), |
|---|
| 927 | | - GFP_KERNEL); |
|---|
| 928 | | - if (!rk_pcie->rsts) |
|---|
| 929 | | - return -ENOMEM; |
|---|
| 930 | | - |
|---|
| 931 | | - of_property_for_each_string(dev->of_node, "reset-names", |
|---|
| 932 | | - prop, name) { |
|---|
| 933 | | - rk_pcie->rsts[i].id = name; |
|---|
| 934 | | - if (!rk_pcie->rsts[i].id) |
|---|
| 935 | | - return -ENOMEM; |
|---|
| 936 | | - i++; |
|---|
| 937 | | - } |
|---|
| 938 | | - |
|---|
| 939 | | - for (i = 0; i < count; i++) { |
|---|
| 940 | | - rk_pcie->rsts[i].rst = devm_reset_control_get_exclusive(dev, |
|---|
| 941 | | - rk_pcie->rsts[i].id); |
|---|
| 942 | | - if (IS_ERR_OR_NULL(rk_pcie->rsts[i].rst)) { |
|---|
| 943 | | - dev_err(dev, "failed to get %s\n", |
|---|
| 944 | | - rk_pcie->clks[i].id); |
|---|
| 945 | | - return -PTR_ERR(rk_pcie->rsts[i].rst); |
|---|
| 946 | | - } |
|---|
| 947 | | - } |
|---|
| 948 | | - |
|---|
| 949 | | - for (i = 0; i < count; i++) { |
|---|
| 950 | | - ret = reset_control_deassert(rk_pcie->rsts[i].rst); |
|---|
| 951 | | - if (ret) { |
|---|
| 952 | | - dev_err(dev, "failed to release %s\n", |
|---|
| 953 | | - rk_pcie->rsts[i].id); |
|---|
| 954 | | - return ret; |
|---|
| 955 | | - } |
|---|
| 956 | | - } |
|---|
| 957 | 1349 | |
|---|
| 958 | 1350 | return 0; |
|---|
| 959 | 1351 | } |
|---|
| .. | .. |
|---|
| 1058 | 1450 | table->start.chnl = table->chn; |
|---|
| 1059 | 1451 | } |
|---|
| 1060 | 1452 | |
|---|
| 1453 | +static void rk_pcie_hot_rst_work(struct work_struct *work) |
|---|
| 1454 | +{ |
|---|
| 1455 | + struct rk_pcie *rk_pcie = container_of(work, struct rk_pcie, hot_rst_work); |
|---|
| 1456 | + u32 val, status; |
|---|
| 1457 | + int ret; |
|---|
| 1458 | + |
|---|
| 1459 | + /* Setup command register */ |
|---|
| 1460 | + val = dw_pcie_readl_dbi(rk_pcie->pci, PCI_COMMAND); |
|---|
| 1461 | + val &= 0xffff0000; |
|---|
| 1462 | + val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | |
|---|
| 1463 | + PCI_COMMAND_MASTER | PCI_COMMAND_SERR; |
|---|
| 1464 | + dw_pcie_writel_dbi(rk_pcie->pci, PCI_COMMAND, val); |
|---|
| 1465 | + |
|---|
| 1466 | + if (rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL) & PCIE_LTSSM_APP_DLY2_EN) { |
|---|
| 1467 | + ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS, |
|---|
| 1468 | + status, ((status & 0x3F) == 0), 100, RK_PCIE_HOTRESET_TMOUT_US); |
|---|
| 1469 | + if (ret) |
|---|
| 1470 | + dev_err(rk_pcie->pci->dev, "wait for detect quiet failed!\n"); |
|---|
| 1471 | + |
|---|
| 1472 | + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL, |
|---|
| 1473 | + (PCIE_LTSSM_APP_DLY2_DONE) | ((PCIE_LTSSM_APP_DLY2_DONE) << 16)); |
|---|
| 1474 | + } |
|---|
| 1475 | +} |
|---|
| 1476 | + |
|---|
| 1061 | 1477 | static irqreturn_t rk_pcie_sys_irq_handler(int irq, void *arg) |
|---|
| 1062 | 1478 | { |
|---|
| 1063 | 1479 | struct rk_pcie *rk_pcie = arg; |
|---|
| 1064 | 1480 | u32 chn; |
|---|
| 1065 | 1481 | union int_status status; |
|---|
| 1066 | 1482 | union int_clear clears; |
|---|
| 1067 | | - u32 reg, val; |
|---|
| 1483 | + u32 reg; |
|---|
| 1068 | 1484 | |
|---|
| 1069 | 1485 | status.asdword = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + |
|---|
| 1070 | 1486 | PCIE_DMA_WR_INT_STATUS); |
|---|
| .. | .. |
|---|
| 1105 | 1521 | } |
|---|
| 1106 | 1522 | |
|---|
| 1107 | 1523 | reg = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC); |
|---|
| 1108 | | - if (reg & BIT(2)) { |
|---|
| 1109 | | - /* Setup command register */ |
|---|
| 1110 | | - val = dw_pcie_readl_dbi(rk_pcie->pci, PCI_COMMAND); |
|---|
| 1111 | | - val &= 0xffff0000; |
|---|
| 1112 | | - val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | |
|---|
| 1113 | | - PCI_COMMAND_MASTER | PCI_COMMAND_SERR; |
|---|
| 1114 | | - dw_pcie_writel_dbi(rk_pcie->pci, PCI_COMMAND, val); |
|---|
| 1115 | | - } |
|---|
| 1524 | + if (reg & BIT(2)) |
|---|
| 1525 | + queue_work(rk_pcie->hot_rst_wq, &rk_pcie->hot_rst_work); |
|---|
| 1116 | 1526 | |
|---|
| 1117 | 1527 | rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC, reg); |
|---|
| 1118 | 1528 | |
|---|
| .. | .. |
|---|
| 1149 | 1559 | .mode = RK_PCIE_EP_TYPE, |
|---|
| 1150 | 1560 | }; |
|---|
| 1151 | 1561 | |
|---|
| 1562 | +static const struct rk_pcie_of_data rk3528_pcie_rc_of_data = { |
|---|
| 1563 | + .mode = RK_PCIE_RC_TYPE, |
|---|
| 1564 | + .msi_vector_num = 8, |
|---|
| 1565 | +}; |
|---|
| 1566 | + |
|---|
| 1152 | 1567 | static const struct of_device_id rk_pcie_of_match[] = { |
|---|
| 1153 | 1568 | { |
|---|
| 1154 | 1569 | .compatible = "rockchip,rk1808-pcie", |
|---|
| .. | .. |
|---|
| 1159 | 1574 | .data = &rk_pcie_ep_of_data, |
|---|
| 1160 | 1575 | }, |
|---|
| 1161 | 1576 | { |
|---|
| 1577 | + .compatible = "rockchip,rk3528-pcie", |
|---|
| 1578 | + .data = &rk3528_pcie_rc_of_data, |
|---|
| 1579 | + }, |
|---|
| 1580 | + { |
|---|
| 1581 | + .compatible = "rockchip,rk3562-pcie", |
|---|
| 1582 | + .data = &rk3528_pcie_rc_of_data, |
|---|
| 1583 | + }, |
|---|
| 1584 | + { |
|---|
| 1162 | 1585 | .compatible = "rockchip,rk3568-pcie", |
|---|
| 1163 | 1586 | .data = &rk_pcie_rc_of_data, |
|---|
| 1164 | 1587 | }, |
|---|
| 1165 | 1588 | { |
|---|
| 1166 | 1589 | .compatible = "rockchip,rk3568-pcie-ep", |
|---|
| 1590 | + .data = &rk_pcie_ep_of_data, |
|---|
| 1591 | + }, |
|---|
| 1592 | + { |
|---|
| 1593 | + .compatible = "rockchip,rk3588-pcie", |
|---|
| 1594 | + .data = &rk_pcie_rc_of_data, |
|---|
| 1595 | + }, |
|---|
| 1596 | + { |
|---|
| 1597 | + .compatible = "rockchip,rk3588-pcie-ep", |
|---|
| 1167 | 1598 | .data = &rk_pcie_ep_of_data, |
|---|
| 1168 | 1599 | }, |
|---|
| 1169 | 1600 | {}, |
|---|
| .. | .. |
|---|
| 1173 | 1604 | |
|---|
| 1174 | 1605 | static const struct dw_pcie_ops dw_pcie_ops = { |
|---|
| 1175 | 1606 | .start_link = rk_pcie_establish_link, |
|---|
| 1176 | | - .link_up = rk_pcie_link_up, |
|---|
| 1177 | 1607 | }; |
|---|
| 1178 | 1608 | |
|---|
| 1179 | 1609 | static int rk1808_pcie_fixup(struct rk_pcie *rk_pcie, struct device_node *np) |
|---|
| .. | .. |
|---|
| 1215 | 1645 | |
|---|
| 1216 | 1646 | /* LTSSM EN ctrl mode */ |
|---|
| 1217 | 1647 | val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL); |
|---|
| 1218 | | - val |= PCIE_LTSSM_ENABLE_ENHANCE | (PCIE_LTSSM_ENABLE_ENHANCE << 16); |
|---|
| 1648 | + val |= (PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN) |
|---|
| 1649 | + | ((PCIE_LTSSM_APP_DLY2_EN | PCIE_LTSSM_ENABLE_ENHANCE) << 16); |
|---|
| 1219 | 1650 | rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL, val); |
|---|
| 1220 | 1651 | } |
|---|
| 1221 | 1652 | |
|---|
| .. | .. |
|---|
| 1253 | 1684 | static int rk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, |
|---|
| 1254 | 1685 | irq_hw_number_t hwirq) |
|---|
| 1255 | 1686 | { |
|---|
| 1256 | | - irq_set_chip_and_handler(irq, &rk_pcie_legacy_irq_chip, handle_simple_irq); |
|---|
| 1687 | + irq_set_chip_and_handler(irq, &rk_pcie_legacy_irq_chip, handle_level_irq); |
|---|
| 1257 | 1688 | irq_set_chip_data(irq, domain->host_data); |
|---|
| 1258 | 1689 | |
|---|
| 1259 | 1690 | return 0; |
|---|
| .. | .. |
|---|
| 1342 | 1773 | return ret; |
|---|
| 1343 | 1774 | } |
|---|
| 1344 | 1775 | |
|---|
| 1345 | | -static int rk_pci_find_capability(struct rk_pcie *rk_pcie, int cap) |
|---|
| 1346 | | -{ |
|---|
| 1347 | | - u32 header; |
|---|
| 1348 | | - int ttl; |
|---|
| 1349 | | - int start = 0; |
|---|
| 1350 | | - int pos = PCI_CFG_SPACE_SIZE; |
|---|
| 1351 | | - |
|---|
| 1352 | | - /* minimum 8 bytes per capability */ |
|---|
| 1353 | | - ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; |
|---|
| 1354 | | - |
|---|
| 1355 | | - header = dw_pcie_readl_dbi(rk_pcie->pci, pos); |
|---|
| 1356 | | - |
|---|
| 1357 | | - /* |
|---|
| 1358 | | - * If we have no capabilities, this is indicated by cap ID, |
|---|
| 1359 | | - * cap version and next pointer all being 0. |
|---|
| 1360 | | - */ |
|---|
| 1361 | | - if (header == 0) |
|---|
| 1362 | | - return 0; |
|---|
| 1363 | | - |
|---|
| 1364 | | - while (ttl-- > 0) { |
|---|
| 1365 | | - if (PCI_EXT_CAP_ID(header) == cap && pos != start) |
|---|
| 1366 | | - return pos; |
|---|
| 1367 | | - |
|---|
| 1368 | | - pos = PCI_EXT_CAP_NEXT(header); |
|---|
| 1369 | | - if (pos < PCI_CFG_SPACE_SIZE) |
|---|
| 1370 | | - break; |
|---|
| 1371 | | - |
|---|
| 1372 | | - header = dw_pcie_readl_dbi(rk_pcie->pci, pos); |
|---|
| 1373 | | - if (!header) |
|---|
| 1374 | | - break; |
|---|
| 1375 | | - } |
|---|
| 1376 | | - |
|---|
| 1377 | | - return 0; |
|---|
| 1378 | | -} |
|---|
| 1379 | | - |
|---|
| 1380 | 1776 | #define RAS_DES_EVENT(ss, v) \ |
|---|
| 1381 | 1777 | do { \ |
|---|
| 1382 | 1778 | dw_pcie_writel_dbi(pcie->pci, cap_base + 8, v); \ |
|---|
| .. | .. |
|---|
| 1387 | 1783 | { |
|---|
| 1388 | 1784 | struct rk_pcie *pcie = s->private; |
|---|
| 1389 | 1785 | int cap_base; |
|---|
| 1786 | + u32 val = rk_pcie_readl_apb(pcie, PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN); |
|---|
| 1787 | + char *pm; |
|---|
| 1390 | 1788 | |
|---|
| 1391 | | - cap_base = rk_pci_find_capability(pcie, PCI_EXT_CAP_ID_VNDR); |
|---|
| 1789 | + if (val & BIT(6)) |
|---|
| 1790 | + pm = "In training"; |
|---|
| 1791 | + else if (val & BIT(5)) |
|---|
| 1792 | + pm = "L1.2"; |
|---|
| 1793 | + else if (val & BIT(4)) |
|---|
| 1794 | + pm = "L1.1"; |
|---|
| 1795 | + else if (val & BIT(3)) |
|---|
| 1796 | + pm = "L1"; |
|---|
| 1797 | + else if (val & BIT(2)) |
|---|
| 1798 | + pm = "L0"; |
|---|
| 1799 | + else if (val & 0x3) |
|---|
| 1800 | + pm = (val == 0x3) ? "L0s" : (val & BIT(1) ? "RX L0s" : "TX L0s"); |
|---|
| 1801 | + else |
|---|
| 1802 | + pm = "Invalid"; |
|---|
| 1803 | + |
|---|
| 1804 | + seq_printf(s, "Common event signal status: 0x%s\n", pm); |
|---|
| 1805 | + |
|---|
| 1806 | + cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR); |
|---|
| 1392 | 1807 | if (!cap_base) { |
|---|
| 1393 | 1808 | dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n"); |
|---|
| 1394 | 1809 | return 0; |
|---|
| .. | .. |
|---|
| 1424 | 1839 | |
|---|
| 1425 | 1840 | return 0; |
|---|
| 1426 | 1841 | } |
|---|
| 1427 | | - |
|---|
| 1428 | 1842 | static int rockchip_pcie_rasdes_open(struct inode *inode, struct file *file) |
|---|
| 1429 | 1843 | { |
|---|
| 1430 | 1844 | return single_open(file, rockchip_pcie_rasdes_show, |
|---|
| .. | .. |
|---|
| 1443 | 1857 | if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) |
|---|
| 1444 | 1858 | return -EFAULT; |
|---|
| 1445 | 1859 | |
|---|
| 1446 | | - cap_base = rk_pci_find_capability(pcie, PCI_EXT_CAP_ID_VNDR); |
|---|
| 1860 | + cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR); |
|---|
| 1447 | 1861 | if (!cap_base) { |
|---|
| 1448 | 1862 | dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n"); |
|---|
| 1449 | 1863 | return 0; |
|---|
| .. | .. |
|---|
| 1528 | 1942 | const struct rk_pcie_of_data *data; |
|---|
| 1529 | 1943 | enum rk_pcie_device_mode mode; |
|---|
| 1530 | 1944 | struct device_node *np = pdev->dev.of_node; |
|---|
| 1531 | | - struct platform_driver *drv = to_platform_driver(dev->driver); |
|---|
| 1532 | | - u32 val; |
|---|
| 1945 | + u32 val = 0; |
|---|
| 1533 | 1946 | int irq; |
|---|
| 1534 | 1947 | |
|---|
| 1535 | 1948 | match = of_match_device(rk_pcie_of_match, dev); |
|---|
| 1536 | | - if (!match) |
|---|
| 1537 | | - return -EINVAL; |
|---|
| 1949 | + if (!match) { |
|---|
| 1950 | + ret = -EINVAL; |
|---|
| 1951 | + goto release_driver; |
|---|
| 1952 | + } |
|---|
| 1538 | 1953 | |
|---|
| 1539 | 1954 | data = (struct rk_pcie_of_data *)match->data; |
|---|
| 1540 | 1955 | mode = (enum rk_pcie_device_mode)data->mode; |
|---|
| 1541 | 1956 | |
|---|
| 1542 | 1957 | rk_pcie = devm_kzalloc(dev, sizeof(*rk_pcie), GFP_KERNEL); |
|---|
| 1543 | | - if (!rk_pcie) |
|---|
| 1544 | | - return -ENOMEM; |
|---|
| 1958 | + if (!rk_pcie) { |
|---|
| 1959 | + ret = -ENOMEM; |
|---|
| 1960 | + goto release_driver; |
|---|
| 1961 | + } |
|---|
| 1545 | 1962 | |
|---|
| 1546 | 1963 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); |
|---|
| 1547 | | - if (!pci) |
|---|
| 1548 | | - return -ENOMEM; |
|---|
| 1964 | + if (!pci) { |
|---|
| 1965 | + ret = -ENOMEM; |
|---|
| 1966 | + goto release_driver; |
|---|
| 1967 | + } |
|---|
| 1549 | 1968 | |
|---|
| 1550 | 1969 | pci->dev = dev; |
|---|
| 1551 | 1970 | pci->ops = &dw_pcie_ops; |
|---|
| 1552 | 1971 | |
|---|
| 1553 | 1972 | rk_pcie->mode = mode; |
|---|
| 1973 | + rk_pcie->msi_vector_num = data->msi_vector_num; |
|---|
| 1554 | 1974 | rk_pcie->pci = pci; |
|---|
| 1555 | 1975 | |
|---|
| 1556 | 1976 | if (of_device_is_compatible(np, "rockchip,rk1808-pcie") || |
|---|
| .. | .. |
|---|
| 1565 | 1985 | ret = rk_pcie_resource_get(pdev, rk_pcie); |
|---|
| 1566 | 1986 | if (ret) { |
|---|
| 1567 | 1987 | dev_err(dev, "resource init failed\n"); |
|---|
| 1568 | | - return ret; |
|---|
| 1988 | + goto release_driver; |
|---|
| 1569 | 1989 | } |
|---|
| 1570 | 1990 | |
|---|
| 1991 | + if (!IS_ERR_OR_NULL(rk_pcie->prsnt_gpio)) { |
|---|
| 1992 | + if (!gpiod_get_value(rk_pcie->prsnt_gpio)) { |
|---|
| 1993 | + dev_info(dev, "device isn't present\n"); |
|---|
| 1994 | + ret = -ENODEV; |
|---|
| 1995 | + goto release_driver; |
|---|
| 1996 | + } |
|---|
| 1997 | + } |
|---|
| 1998 | + |
|---|
| 1999 | + rk_pcie->supports_clkreq = device_property_read_bool(dev, "supports-clkreq"); |
|---|
| 2000 | + |
|---|
| 2001 | +retry_regulator: |
|---|
| 1571 | 2002 | /* DON'T MOVE ME: must be enable before phy init */ |
|---|
| 1572 | 2003 | rk_pcie->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); |
|---|
| 1573 | 2004 | if (IS_ERR(rk_pcie->vpcie3v3)) { |
|---|
| 1574 | | - if (PTR_ERR(rk_pcie->vpcie3v3) != -ENODEV) |
|---|
| 1575 | | - return PTR_ERR(rk_pcie->vpcie3v3); |
|---|
| 2005 | + if (PTR_ERR(rk_pcie->vpcie3v3) != -ENODEV) { |
|---|
| 2006 | + if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) { |
|---|
| 2007 | + /* Deferred but in threaded context for most 10s */ |
|---|
| 2008 | + msleep(20); |
|---|
| 2009 | + if (++val < 500) |
|---|
| 2010 | + goto retry_regulator; |
|---|
| 2011 | + } |
|---|
| 2012 | + |
|---|
| 2013 | + ret = PTR_ERR(rk_pcie->vpcie3v3); |
|---|
| 2014 | + goto release_driver; |
|---|
| 2015 | + } |
|---|
| 2016 | + |
|---|
| 1576 | 2017 | dev_info(dev, "no vpcie3v3 regulator found\n"); |
|---|
| 1577 | 2018 | } |
|---|
| 1578 | 2019 | |
|---|
| 1579 | 2020 | ret = rk_pcie_enable_power(rk_pcie); |
|---|
| 1580 | 2021 | if (ret) |
|---|
| 1581 | | - return ret; |
|---|
| 2022 | + goto release_driver; |
|---|
| 1582 | 2023 | |
|---|
| 1583 | 2024 | ret = rk_pcie_phy_init(rk_pcie); |
|---|
| 1584 | 2025 | if (ret) { |
|---|
| .. | .. |
|---|
| 1586 | 2027 | goto disable_vpcie3v3; |
|---|
| 1587 | 2028 | } |
|---|
| 1588 | 2029 | |
|---|
| 1589 | | - ret = rk_pcie_reset_control_release(rk_pcie); |
|---|
| 1590 | | - if (ret) { |
|---|
| 1591 | | - dev_err(dev, "reset control init failed\n"); |
|---|
| 1592 | | - goto disable_vpcie3v3; |
|---|
| 2030 | + rk_pcie->rsts = devm_reset_control_array_get_exclusive(dev); |
|---|
| 2031 | + if (IS_ERR(rk_pcie->rsts)) { |
|---|
| 2032 | + ret = PTR_ERR(rk_pcie->rsts); |
|---|
| 2033 | + dev_err(dev, "failed to get reset lines\n"); |
|---|
| 2034 | + goto disable_phy; |
|---|
| 1593 | 2035 | } |
|---|
| 2036 | + |
|---|
| 2037 | + reset_control_deassert(rk_pcie->rsts); |
|---|
| 1594 | 2038 | |
|---|
| 1595 | 2039 | ret = rk_pcie_request_sys_irq(rk_pcie, pdev); |
|---|
| 1596 | 2040 | if (ret) { |
|---|
| 1597 | 2041 | dev_err(dev, "pcie irq init failed\n"); |
|---|
| 1598 | | - goto disable_vpcie3v3; |
|---|
| 2042 | + goto disable_phy; |
|---|
| 1599 | 2043 | } |
|---|
| 1600 | 2044 | |
|---|
| 1601 | 2045 | platform_set_drvdata(pdev, rk_pcie); |
|---|
| .. | .. |
|---|
| 1603 | 2047 | ret = rk_pcie_clk_init(rk_pcie); |
|---|
| 1604 | 2048 | if (ret) { |
|---|
| 1605 | 2049 | dev_err(dev, "clock init failed\n"); |
|---|
| 1606 | | - goto disable_vpcie3v3; |
|---|
| 2050 | + goto disable_phy; |
|---|
| 1607 | 2051 | } |
|---|
| 1608 | 2052 | |
|---|
| 1609 | 2053 | dw_pcie_dbi_ro_wr_en(pci); |
|---|
| .. | .. |
|---|
| 1626 | 2070 | /* Unmask all legacy interrupt from INTA~INTD */ |
|---|
| 1627 | 2071 | rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY, |
|---|
| 1628 | 2072 | UNMASK_ALL_LEGACY_INT); |
|---|
| 2073 | + } else { |
|---|
| 2074 | + dev_info(dev, "missing legacy IRQ resource\n"); |
|---|
| 1629 | 2075 | } |
|---|
| 1630 | | - |
|---|
| 1631 | | - dev_info(dev, "missing legacy IRQ resource\n"); |
|---|
| 1632 | 2076 | } |
|---|
| 1633 | 2077 | |
|---|
| 1634 | 2078 | /* Set PCIe mode */ |
|---|
| .. | .. |
|---|
| 1642 | 2086 | rk_pcie->is_signal_test = true; |
|---|
| 1643 | 2087 | } |
|---|
| 1644 | 2088 | |
|---|
| 1645 | | - /* Force into compliance mode */ |
|---|
| 1646 | | - if (device_property_read_bool(dev, "rockchip,compliance-mode")) { |
|---|
| 1647 | | - val = dw_pcie_readl_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS); |
|---|
| 1648 | | - val |= BIT(4); |
|---|
| 1649 | | - dw_pcie_writel_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS, val); |
|---|
| 2089 | + /* |
|---|
| 2090 | + * Force into compliance mode |
|---|
| 2091 | + * comp_prst is a two dimensional array of which the first element |
|---|
| 2092 | + * stands for speed mode, and the second one is preset value encoding: |
|---|
| 2093 | + * [0] 0->SMA tool control the signal switch, 1/2/3 is for manual Gen setting |
|---|
| 2094 | + * [1] transmitter setting for manual Gen setting, valid only if [0] isn't zero. |
|---|
| 2095 | + */ |
|---|
| 2096 | + if (!device_property_read_u32_array(dev, "rockchip,compliance-mode", |
|---|
| 2097 | + rk_pcie->comp_prst, 2)) { |
|---|
| 2098 | + BUG_ON(rk_pcie->comp_prst[0] > 3 || rk_pcie->comp_prst[1] > 10); |
|---|
| 2099 | + if (!rk_pcie->comp_prst[0]) { |
|---|
| 2100 | + dev_info(dev, "Auto compliance mode for SMA tool.\n"); |
|---|
| 2101 | + } else { |
|---|
| 2102 | + dev_info(dev, "compliance mode for soldered board Gen%d, P%d.\n", |
|---|
| 2103 | + rk_pcie->comp_prst[0], rk_pcie->comp_prst[1]); |
|---|
| 2104 | + val = dw_pcie_readl_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS); |
|---|
| 2105 | + val |= BIT(4) | rk_pcie->comp_prst[0] | (rk_pcie->comp_prst[1] << 12); |
|---|
| 2106 | + dw_pcie_writel_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS, val); |
|---|
| 2107 | + } |
|---|
| 1650 | 2108 | rk_pcie->is_signal_test = true; |
|---|
| 1651 | 2109 | } |
|---|
| 2110 | + |
|---|
| 2111 | + /* Skip waiting for training to pass in system PM routine */ |
|---|
| 2112 | + if (device_property_read_bool(dev, "rockchip,skip-scan-in-resume")) |
|---|
| 2113 | + rk_pcie->skip_scan_in_resume = true; |
|---|
| 2114 | + |
|---|
| 2115 | + rk_pcie->hot_rst_wq = create_singlethread_workqueue("rk_pcie_hot_rst_wq"); |
|---|
| 2116 | + if (!rk_pcie->hot_rst_wq) { |
|---|
| 2117 | + dev_err(dev, "failed to create hot_rst workqueue\n"); |
|---|
| 2118 | + ret = -ENOMEM; |
|---|
| 2119 | + goto remove_irq_domain; |
|---|
| 2120 | + } |
|---|
| 2121 | + INIT_WORK(&rk_pcie->hot_rst_work, rk_pcie_hot_rst_work); |
|---|
| 1652 | 2122 | |
|---|
| 1653 | 2123 | switch (rk_pcie->mode) { |
|---|
| 1654 | 2124 | case RK_PCIE_RC_TYPE: |
|---|
| .. | .. |
|---|
| 1663 | 2133 | return 0; |
|---|
| 1664 | 2134 | |
|---|
| 1665 | 2135 | if (ret) |
|---|
| 1666 | | - goto remove_irq_domain; |
|---|
| 2136 | + goto remove_rst_wq; |
|---|
| 1667 | 2137 | |
|---|
| 1668 | 2138 | ret = rk_pcie_init_dma_trx(rk_pcie); |
|---|
| 1669 | 2139 | if (ret) { |
|---|
| 1670 | 2140 | dev_err(dev, "failed to add dma extension\n"); |
|---|
| 1671 | | - return ret; |
|---|
| 2141 | + goto remove_rst_wq; |
|---|
| 1672 | 2142 | } |
|---|
| 1673 | 2143 | |
|---|
| 1674 | 2144 | if (rk_pcie->dma_obj) { |
|---|
| .. | .. |
|---|
| 1680 | 2150 | /* hold link reset grant after link-up */ |
|---|
| 1681 | 2151 | ret = rk_pcie_reset_grant_ctrl(rk_pcie, false); |
|---|
| 1682 | 2152 | if (ret) |
|---|
| 1683 | | - goto remove_irq_domain; |
|---|
| 2153 | + goto remove_rst_wq; |
|---|
| 1684 | 2154 | } |
|---|
| 1685 | 2155 | |
|---|
| 1686 | 2156 | dw_pcie_dbi_ro_wr_dis(pci); |
|---|
| 1687 | 2157 | |
|---|
| 1688 | 2158 | device_init_wakeup(dev, true); |
|---|
| 1689 | | - drv->driver.pm = &rockchip_dw_pcie_pm_ops; |
|---|
| 2159 | + |
|---|
| 2160 | + /* Enable async system PM for multiports SoC */ |
|---|
| 2161 | + device_enable_async_suspend(dev); |
|---|
| 1690 | 2162 | |
|---|
| 1691 | 2163 | if (IS_ENABLED(CONFIG_DEBUG_FS)) { |
|---|
| 1692 | 2164 | ret = rockchip_pcie_debugfs_init(rk_pcie); |
|---|
| .. | .. |
|---|
| 1694 | 2166 | dev_err(dev, "failed to setup debugfs: %d\n", ret); |
|---|
| 1695 | 2167 | |
|---|
| 1696 | 2168 | /* Enable RASDES Error event by default */ |
|---|
| 1697 | | - val = rk_pci_find_capability(rk_pcie, PCI_EXT_CAP_ID_VNDR); |
|---|
| 2169 | + val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_VNDR); |
|---|
| 1698 | 2170 | if (!val) { |
|---|
| 1699 | 2171 | dev_err(dev, "Not able to find RASDES CAP!\n"); |
|---|
| 1700 | 2172 | return 0; |
|---|
| .. | .. |
|---|
| 1706 | 2178 | |
|---|
| 1707 | 2179 | return 0; |
|---|
| 1708 | 2180 | |
|---|
| 2181 | +remove_rst_wq: |
|---|
| 2182 | + destroy_workqueue(rk_pcie->hot_rst_wq); |
|---|
| 1709 | 2183 | remove_irq_domain: |
|---|
| 1710 | 2184 | if (rk_pcie->irq_domain) |
|---|
| 1711 | 2185 | irq_domain_remove(rk_pcie->irq_domain); |
|---|
| 2186 | +disable_phy: |
|---|
| 2187 | + phy_power_off(rk_pcie->phy); |
|---|
| 2188 | + phy_exit(rk_pcie->phy); |
|---|
| 1712 | 2189 | deinit_clk: |
|---|
| 1713 | | - rk_pcie_clk_deinit(rk_pcie); |
|---|
| 2190 | + clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks); |
|---|
| 1714 | 2191 | disable_vpcie3v3: |
|---|
| 1715 | 2192 | rk_pcie_disable_power(rk_pcie); |
|---|
| 2193 | +release_driver: |
|---|
| 2194 | + if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) |
|---|
| 2195 | + device_release_driver(dev); |
|---|
| 1716 | 2196 | |
|---|
| 1717 | 2197 | return ret; |
|---|
| 1718 | 2198 | } |
|---|
| 1719 | 2199 | |
|---|
| 1720 | 2200 | static int rk_pcie_probe(struct platform_device *pdev) |
|---|
| 1721 | 2201 | { |
|---|
| 1722 | | - struct task_struct *tsk; |
|---|
| 2202 | + if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) { |
|---|
| 2203 | + struct task_struct *tsk; |
|---|
| 1723 | 2204 | |
|---|
| 1724 | | - tsk = kthread_run(rk_pcie_really_probe, pdev, "rk-pcie"); |
|---|
| 1725 | | - if (IS_ERR(tsk)) { |
|---|
| 1726 | | - dev_err(&pdev->dev, "start rk-pcie thread failed\n"); |
|---|
| 1727 | | - return PTR_ERR(tsk); |
|---|
| 2205 | + tsk = kthread_run(rk_pcie_really_probe, pdev, "rk-pcie"); |
|---|
| 2206 | + if (IS_ERR(tsk)) { |
|---|
| 2207 | + dev_err(&pdev->dev, "start rk-pcie thread failed\n"); |
|---|
| 2208 | + return PTR_ERR(tsk); |
|---|
| 2209 | + } |
|---|
| 2210 | + |
|---|
| 2211 | + return 0; |
|---|
| 1728 | 2212 | } |
|---|
| 1729 | | - return 0; |
|---|
| 2213 | + |
|---|
| 2214 | + return rk_pcie_really_probe(pdev); |
|---|
| 1730 | 2215 | } |
|---|
| 2216 | + |
|---|
| 2217 | +#ifdef CONFIG_PCIEASPM |
|---|
| 2218 | +static void rk_pcie_downstream_dev_to_d0(struct rk_pcie *rk_pcie, bool enable) |
|---|
| 2219 | +{ |
|---|
| 2220 | + struct pcie_port *pp = &rk_pcie->pci->pp; |
|---|
| 2221 | + struct pci_bus *child, *root_bus = NULL; |
|---|
| 2222 | + struct pci_dev *pdev, *bridge; |
|---|
| 2223 | + u32 val; |
|---|
| 2224 | + |
|---|
| 2225 | + list_for_each_entry(child, &pp->bridge->bus->children, node) { |
|---|
| 2226 | + /* Bring downstream devices to D3 if they are not already in */ |
|---|
| 2227 | + if (child->parent == pp->bridge->bus) { |
|---|
| 2228 | + root_bus = child; |
|---|
| 2229 | + bridge = root_bus->self; |
|---|
| 2230 | + break; |
|---|
| 2231 | + } |
|---|
| 2232 | + } |
|---|
| 2233 | + |
|---|
| 2234 | + if (!root_bus) { |
|---|
| 2235 | + dev_err(rk_pcie->pci->dev, "Failed to find downstream devices\n"); |
|---|
| 2236 | + return; |
|---|
| 2237 | + } |
|---|
| 2238 | + |
|---|
| 2239 | + /* Save and restore root bus ASPM */ |
|---|
| 2240 | + if (enable) { |
|---|
| 2241 | + if (rk_pcie->l1ss_ctl1) |
|---|
| 2242 | + dw_pcie_writel_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1, rk_pcie->l1ss_ctl1); |
|---|
| 2243 | + |
|---|
| 2244 | + /* rk_pcie->aspm woule be saved in advance when enable is false */ |
|---|
| 2245 | + dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, rk_pcie->aspm); |
|---|
| 2246 | + } else { |
|---|
| 2247 | + val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1); |
|---|
| 2248 | + if (val & PCI_L1SS_CTL1_L1SS_MASK) |
|---|
| 2249 | + rk_pcie->l1ss_ctl1 = val; |
|---|
| 2250 | + else |
|---|
| 2251 | + rk_pcie->l1ss_ctl1 = 0; |
|---|
| 2252 | + |
|---|
| 2253 | + val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL); |
|---|
| 2254 | + rk_pcie->aspm = val & PCI_EXP_LNKCTL_ASPMC; |
|---|
| 2255 | + val &= ~(PCI_EXP_LNKCAP_ASPM_L1 | PCI_EXP_LNKCAP_ASPM_L0S); |
|---|
| 2256 | + dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, val); |
|---|
| 2257 | + } |
|---|
| 2258 | + |
|---|
| 2259 | + list_for_each_entry(pdev, &root_bus->devices, bus_list) { |
|---|
| 2260 | + if (PCI_SLOT(pdev->devfn) == 0) { |
|---|
| 2261 | + if (pci_set_power_state(pdev, PCI_D0)) |
|---|
| 2262 | + dev_err(rk_pcie->pci->dev, |
|---|
| 2263 | + "Failed to transition %s to D3hot state\n", |
|---|
| 2264 | + dev_name(&pdev->dev)); |
|---|
| 2265 | + if (enable) { |
|---|
| 2266 | + if (rk_pcie->l1ss_ctl1) { |
|---|
| 2267 | + pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, &val); |
|---|
| 2268 | + val &= ~PCI_L1SS_CTL1_L1SS_MASK; |
|---|
| 2269 | + val |= (rk_pcie->l1ss_ctl1 & PCI_L1SS_CTL1_L1SS_MASK); |
|---|
| 2270 | + pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, val); |
|---|
| 2271 | + } |
|---|
| 2272 | + |
|---|
| 2273 | + pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, |
|---|
| 2274 | + PCI_EXP_LNKCTL_ASPMC, rk_pcie->aspm); |
|---|
| 2275 | + } else { |
|---|
| 2276 | + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); |
|---|
| 2277 | + } |
|---|
| 2278 | + } |
|---|
| 2279 | + } |
|---|
| 2280 | +} |
|---|
| 2281 | +#endif |
|---|
| 1731 | 2282 | |
|---|
| 1732 | 2283 | static int __maybe_unused rockchip_dw_pcie_suspend(struct device *dev) |
|---|
| 1733 | 2284 | { |
|---|
| 1734 | 2285 | struct rk_pcie *rk_pcie = dev_get_drvdata(dev); |
|---|
| 1735 | | - int ret; |
|---|
| 2286 | + int ret = 0, power; |
|---|
| 2287 | + struct dw_pcie *pci = rk_pcie->pci; |
|---|
| 2288 | + u32 status; |
|---|
| 2289 | + |
|---|
| 2290 | + /* |
|---|
| 2291 | + * This is as per PCI Express Base r5.0 r1.0 May 22-2019, |
|---|
| 2292 | + * 5.2 Link State Power Management (Page #440). |
|---|
| 2293 | + * |
|---|
| 2294 | + * L2/L3 Ready entry negotiations happen while in the L0 state. |
|---|
| 2295 | + * L2/L3 Ready are entered only after the negotiation completes. |
|---|
| 2296 | + * |
|---|
| 2297 | + * The following example sequence illustrates the multi-step Link state |
|---|
| 2298 | + * transition process leading up to entering a system sleep state: |
|---|
| 2299 | + * 1. System software directs all Functions of a Downstream component to D3Hot. |
|---|
| 2300 | + * 2. The Downstream component then initiates the transition of the Link to L1 |
|---|
| 2301 | + * as required. |
|---|
| 2302 | + * 3. System software then causes the Root Complex to broadcast the PME_Turn_Off |
|---|
| 2303 | + * Message in preparation for removing the main power source. |
|---|
| 2304 | + * 4. This Message causes the subject Link to transition back to L0 in order to |
|---|
| 2305 | + * send it and to enable the Downstream component to respond with PME_TO_Ack. |
|---|
| 2306 | + * 5. After sending the PME_TO_Ack, the Downstream component initiates the L2/L3 |
|---|
| 2307 | + * Ready transition protocol. |
|---|
| 2308 | + */ |
|---|
| 2309 | + |
|---|
| 2310 | + /* 1. All sub-devices are in D3hot by PCIe stack */ |
|---|
| 2311 | + dw_pcie_dbi_ro_wr_dis(rk_pcie->pci); |
|---|
| 1736 | 2312 | |
|---|
| 1737 | 2313 | rk_pcie_link_status_clear(rk_pcie); |
|---|
| 2314 | + |
|---|
| 2315 | + /* |
|---|
| 2316 | + * Wlan devices will be shutdown from function driver now, so doing L2 here |
|---|
| 2317 | + * must fail. Skip L2 routine. |
|---|
| 2318 | + */ |
|---|
| 2319 | + if (rk_pcie->skip_scan_in_resume) { |
|---|
| 2320 | + rfkill_get_wifi_power_state(&power); |
|---|
| 2321 | + if (!power) |
|---|
| 2322 | + goto no_l2; |
|---|
| 2323 | + } |
|---|
| 2324 | + |
|---|
| 2325 | + /* 2. Broadcast PME_Turn_Off Message */ |
|---|
| 2326 | + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_MSG_GEN, PME_TURN_OFF); |
|---|
| 2327 | + ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_MSG_GEN, |
|---|
| 2328 | + status, !(status & BIT(4)), 20, RK_PCIE_L2_TMOUT_US); |
|---|
| 2329 | + if (ret) { |
|---|
| 2330 | + dev_err(dev, "Failed to send PME_Turn_Off\n"); |
|---|
| 2331 | + goto no_l2; |
|---|
| 2332 | + } |
|---|
| 2333 | + |
|---|
| 2334 | + /* 3. Wait for PME_TO_Ack */ |
|---|
| 2335 | + ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_INTR_STATUS_MSG_RX, |
|---|
| 2336 | + status, status & BIT(9), 20, RK_PCIE_L2_TMOUT_US); |
|---|
| 2337 | + if (ret) { |
|---|
| 2338 | + dev_err(dev, "Failed to receive PME_TO_Ack\n"); |
|---|
| 2339 | + goto no_l2; |
|---|
| 2340 | + } |
|---|
| 2341 | + |
|---|
| 2342 | + /* 4. Clear PME_TO_Ack and Wait for ready to enter L23 message */ |
|---|
| 2343 | + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MSG_RX, PME_TO_ACK); |
|---|
| 2344 | + ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_POWER, |
|---|
| 2345 | + status, status & READY_ENTER_L23, 20, RK_PCIE_L2_TMOUT_US); |
|---|
| 2346 | + if (ret) { |
|---|
| 2347 | + dev_err(dev, "Failed to ready to enter L23\n"); |
|---|
| 2348 | + goto no_l2; |
|---|
| 2349 | + } |
|---|
| 2350 | + |
|---|
| 2351 | + /* 5. Check we are in L2 */ |
|---|
| 2352 | + ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS, |
|---|
| 2353 | + status, ((status & S_MAX) == S_L2_IDLE), 20, RK_PCIE_L2_TMOUT_US); |
|---|
| 2354 | + if (ret) |
|---|
| 2355 | + dev_err(pci->dev, "Link isn't in L2 idle!\n"); |
|---|
| 2356 | + |
|---|
| 2357 | +no_l2: |
|---|
| 1738 | 2358 | rk_pcie_disable_ltssm(rk_pcie); |
|---|
| 2359 | + |
|---|
| 2360 | + ret = phy_validate(rk_pcie->phy, PHY_TYPE_PCIE, 0, NULL); |
|---|
| 2361 | + if (ret && ret != -EOPNOTSUPP) { |
|---|
| 2362 | + dev_err(dev, "PHY is reused by other controller, check the dts!\n"); |
|---|
| 2363 | + return ret; |
|---|
| 2364 | + } |
|---|
| 1739 | 2365 | |
|---|
| 1740 | 2366 | /* make sure assert phy success */ |
|---|
| 1741 | 2367 | usleep_range(200, 300); |
|---|
| .. | .. |
|---|
| 1743 | 2369 | phy_power_off(rk_pcie->phy); |
|---|
| 1744 | 2370 | phy_exit(rk_pcie->phy); |
|---|
| 1745 | 2371 | |
|---|
| 1746 | | - clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks); |
|---|
| 2372 | + rk_pcie->intx = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY); |
|---|
| 2373 | + |
|---|
| 2374 | + clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks); |
|---|
| 1747 | 2375 | |
|---|
| 1748 | 2376 | rk_pcie->in_suspend = true; |
|---|
| 1749 | 2377 | |
|---|
| 1750 | 2378 | gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0); |
|---|
| 1751 | 2379 | ret = rk_pcie_disable_power(rk_pcie); |
|---|
| 1752 | | - |
|---|
| 1753 | | - if (rk_pcie->pci->pp.msi_irq > 0) |
|---|
| 1754 | | - dw_pcie_free_msi(&rk_pcie->pci->pp); |
|---|
| 1755 | 2380 | |
|---|
| 1756 | 2381 | return ret; |
|---|
| 1757 | 2382 | } |
|---|
| .. | .. |
|---|
| 1762 | 2387 | bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj; |
|---|
| 1763 | 2388 | int ret; |
|---|
| 1764 | 2389 | |
|---|
| 2390 | + reset_control_assert(rk_pcie->rsts); |
|---|
| 2391 | + udelay(10); |
|---|
| 2392 | + reset_control_deassert(rk_pcie->rsts); |
|---|
| 2393 | + |
|---|
| 1765 | 2394 | ret = rk_pcie_enable_power(rk_pcie); |
|---|
| 1766 | 2395 | if (ret) |
|---|
| 1767 | 2396 | return ret; |
|---|
| 1768 | 2397 | |
|---|
| 1769 | | - ret = clk_bulk_enable(rk_pcie->clk_cnt, rk_pcie->clks); |
|---|
| 2398 | + ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks); |
|---|
| 1770 | 2399 | if (ret) { |
|---|
| 1771 | | - clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks); |
|---|
| 2400 | + dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret); |
|---|
| 1772 | 2401 | return ret; |
|---|
| 1773 | 2402 | } |
|---|
| 1774 | 2403 | |
|---|
| 1775 | | - ret = phy_set_mode(rk_pcie->phy, rk_pcie->phy_mode); |
|---|
| 2404 | + ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode, |
|---|
| 2405 | + rk_pcie->phy_sub_mode); |
|---|
| 1776 | 2406 | if (ret) { |
|---|
| 1777 | 2407 | dev_err(dev, "fail to set phy to mode %s, err %d\n", |
|---|
| 1778 | | - (rk_pcie->phy_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP", |
|---|
| 2408 | + (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP", |
|---|
| 1779 | 2409 | ret); |
|---|
| 1780 | 2410 | return ret; |
|---|
| 1781 | 2411 | } |
|---|
| .. | .. |
|---|
| 1805 | 2435 | if (std_rc) |
|---|
| 1806 | 2436 | dw_pcie_setup_rc(&rk_pcie->pci->pp); |
|---|
| 1807 | 2437 | |
|---|
| 2438 | + rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY, |
|---|
| 2439 | + rk_pcie->intx | 0xffff0000); |
|---|
| 2440 | + |
|---|
| 1808 | 2441 | ret = rk_pcie_establish_link(rk_pcie->pci); |
|---|
| 1809 | 2442 | if (ret) { |
|---|
| 1810 | 2443 | dev_err(dev, "failed to establish pcie link\n"); |
|---|
| 1811 | 2444 | goto err; |
|---|
| 1812 | 2445 | } |
|---|
| 1813 | | - |
|---|
| 1814 | | - if (rk_pcie->pci->pp.msi_irq > 0) |
|---|
| 1815 | | - dw_pcie_msi_init(&rk_pcie->pci->pp); |
|---|
| 1816 | 2446 | |
|---|
| 1817 | 2447 | if (std_rc) |
|---|
| 1818 | 2448 | goto std_rc_done; |
|---|
| .. | .. |
|---|
| 1836 | 2466 | goto err; |
|---|
| 1837 | 2467 | } |
|---|
| 1838 | 2468 | |
|---|
| 2469 | + if (rk_pcie->pci->pp.msi_irq > 0) |
|---|
| 2470 | + dw_pcie_msi_init(&rk_pcie->pci->pp); |
|---|
| 2471 | + |
|---|
| 1839 | 2472 | return 0; |
|---|
| 1840 | 2473 | err: |
|---|
| 1841 | 2474 | rk_pcie_disable_power(rk_pcie); |
|---|
| .. | .. |
|---|
| 1843 | 2476 | return ret; |
|---|
| 1844 | 2477 | } |
|---|
| 1845 | 2478 | |
|---|
| 2479 | +#ifdef CONFIG_PCIEASPM |
|---|
| 2480 | +static int rockchip_dw_pcie_prepare(struct device *dev) |
|---|
| 2481 | +{ |
|---|
| 2482 | + struct rk_pcie *rk_pcie = dev_get_drvdata(dev); |
|---|
| 2483 | + |
|---|
| 2484 | + dw_pcie_dbi_ro_wr_en(rk_pcie->pci); |
|---|
| 2485 | + rk_pcie_downstream_dev_to_d0(rk_pcie, false); |
|---|
| 2486 | + dw_pcie_dbi_ro_wr_dis(rk_pcie->pci); |
|---|
| 2487 | + |
|---|
| 2488 | + return 0; |
|---|
| 2489 | +} |
|---|
| 2490 | + |
|---|
| 2491 | +static void rockchip_dw_pcie_complete(struct device *dev) |
|---|
| 2492 | +{ |
|---|
| 2493 | + struct rk_pcie *rk_pcie = dev_get_drvdata(dev); |
|---|
| 2494 | + |
|---|
| 2495 | + dw_pcie_dbi_ro_wr_en(rk_pcie->pci); |
|---|
| 2496 | + rk_pcie_downstream_dev_to_d0(rk_pcie, true); |
|---|
| 2497 | + dw_pcie_dbi_ro_wr_dis(rk_pcie->pci); |
|---|
| 2498 | +} |
|---|
| 2499 | +#endif |
|---|
| 2500 | + |
|---|
| 1846 | 2501 | static const struct dev_pm_ops rockchip_dw_pcie_pm_ops = { |
|---|
| 2502 | +#ifdef CONFIG_PCIEASPM |
|---|
| 2503 | + .prepare = rockchip_dw_pcie_prepare, |
|---|
| 2504 | + .complete = rockchip_dw_pcie_complete, |
|---|
| 2505 | +#endif |
|---|
| 1847 | 2506 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_dw_pcie_suspend, |
|---|
| 1848 | 2507 | rockchip_dw_pcie_resume) |
|---|
| 1849 | 2508 | }; |
|---|
| .. | .. |
|---|
| 1853 | 2512 | .name = "rk-pcie", |
|---|
| 1854 | 2513 | .of_match_table = rk_pcie_of_match, |
|---|
| 1855 | 2514 | .suppress_bind_attrs = true, |
|---|
| 2515 | + .pm = &rockchip_dw_pcie_pm_ops, |
|---|
| 1856 | 2516 | }, |
|---|
| 2517 | + .probe = rk_pcie_probe, |
|---|
| 1857 | 2518 | }; |
|---|
| 1858 | 2519 | |
|---|
| 1859 | | -module_platform_driver_probe(rk_plat_pcie_driver, rk_pcie_probe); |
|---|
| 2520 | +module_platform_driver(rk_plat_pcie_driver); |
|---|
| 1860 | 2521 | |
|---|
| 1861 | 2522 | MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com>"); |
|---|
| 1862 | 2523 | MODULE_DESCRIPTION("RockChip PCIe Controller driver"); |
|---|