.. | .. |
---|
14 | 14 | #include <linux/irq.h> |
---|
15 | 15 | #include <linux/irqdomain.h> |
---|
16 | 16 | #include <linux/kernel.h> |
---|
| 17 | +#include <linux/module.h> |
---|
17 | 18 | #include <linux/pci.h> |
---|
18 | 19 | #include <linux/init.h> |
---|
| 20 | +#include <linux/phy/phy.h> |
---|
19 | 21 | #include <linux/platform_device.h> |
---|
| 22 | +#include <linux/msi.h> |
---|
20 | 23 | #include <linux/of_address.h> |
---|
21 | 24 | #include <linux/of_gpio.h> |
---|
22 | 25 | #include <linux/of_pci.h> |
---|
23 | 26 | |
---|
24 | 27 | #include "../pci.h" |
---|
| 28 | +#include "../pci-bridge-emul.h" |
---|
25 | 29 | |
---|
26 | 30 | /* PCIe core registers */ |
---|
| 31 | +#define PCIE_CORE_DEV_ID_REG 0x0 |
---|
27 | 32 | #define PCIE_CORE_CMD_STATUS_REG 0x4 |
---|
28 | | -#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0) |
---|
29 | | -#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1) |
---|
30 | | -#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2) |
---|
| 33 | +#define PCIE_CORE_DEV_REV_REG 0x8 |
---|
31 | 34 | #define PCIE_CORE_PCIEXP_CAP 0xc0 |
---|
32 | 35 | #define PCIE_CORE_ERR_CAPCTL_REG 0x118 |
---|
33 | 36 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) |
---|
34 | 37 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) |
---|
35 | 38 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) |
---|
36 | 39 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) |
---|
37 | | - |
---|
| 40 | +#define PCIE_CORE_INT_A_ASSERT_ENABLE 1 |
---|
| 41 | +#define PCIE_CORE_INT_B_ASSERT_ENABLE 2 |
---|
| 42 | +#define PCIE_CORE_INT_C_ASSERT_ENABLE 3 |
---|
| 43 | +#define PCIE_CORE_INT_D_ASSERT_ENABLE 4 |
---|
38 | 44 | /* PIO registers base address and register offsets */ |
---|
39 | 45 | #define PIO_BASE_ADDR 0x4000 |
---|
40 | 46 | #define PIO_CTRL (PIO_BASE_ADDR + 0x0) |
---|
.. | .. |
---|
87 | 93 | #define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5) |
---|
88 | 94 | #define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6) |
---|
89 | 95 | #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) |
---|
| 96 | +#define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14) |
---|
| 97 | +#define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1) |
---|
| 98 | +#define PCIE_CORE_REF_CLK_RX_ENABLE BIT(2) |
---|
| 99 | +#define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30) |
---|
90 | 100 | #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) |
---|
| 101 | +#define PCIE_MSG_PM_PME_MASK BIT(7) |
---|
91 | 102 | #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) |
---|
92 | 103 | #define PCIE_ISR0_MSI_INT_PENDING BIT(24) |
---|
93 | 104 | #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) |
---|
.. | .. |
---|
103 | 114 | #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) |
---|
104 | 115 | #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) |
---|
105 | 116 | #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) |
---|
| 117 | +#define PCIE_MSI_ALL_MASK GENMASK(31, 0) |
---|
106 | 118 | #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) |
---|
107 | 119 | #define PCIE_MSI_DATA_MASK GENMASK(15, 0) |
---|
108 | 120 | |
---|
.. | .. |
---|
111 | 123 | #define OB_WIN_BLOCK_SIZE 0x20 |
---|
112 | 124 | #define OB_WIN_COUNT 8 |
---|
113 | 125 | #define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \ |
---|
114 | | - OB_WIN_BLOCK_SIZE * (win) + \ |
---|
115 | | - (offset)) |
---|
| 126 | + OB_WIN_BLOCK_SIZE * (win) + \ |
---|
| 127 | + (offset)) |
---|
116 | 128 | #define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00) |
---|
117 | 129 | #define OB_WIN_ENABLE BIT(0) |
---|
118 | 130 | #define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04) |
---|
.. | .. |
---|
195 | 207 | LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b, |
---|
196 | 208 | }; |
---|
197 | 209 | |
---|
| 210 | +#define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44) |
---|
| 211 | + |
---|
198 | 212 | /* PCIe core controller registers */ |
---|
199 | 213 | #define CTRL_CORE_BASE_ADDR 0x18000 |
---|
200 | 214 | #define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0) |
---|
.. | .. |
---|
248 | 262 | #define LINK_WAIT_MAX_RETRIES 10 |
---|
249 | 263 | #define LINK_WAIT_USLEEP_MIN 90000 |
---|
250 | 264 | #define LINK_WAIT_USLEEP_MAX 100000 |
---|
| 265 | +#define RETRAIN_WAIT_MAX_RETRIES 10 |
---|
| 266 | +#define RETRAIN_WAIT_USLEEP_US 2000 |
---|
251 | 267 | |
---|
252 | 268 | #define MSI_IRQ_NUM 32 |
---|
| 269 | + |
---|
| 270 | +#define CFG_RD_CRS_VAL 0xffff0001 |
---|
253 | 271 | |
---|
254 | 272 | struct advk_pcie { |
---|
255 | 273 | struct platform_device *pdev; |
---|
256 | 274 | void __iomem *base; |
---|
257 | | - struct list_head resources; |
---|
258 | 275 | struct { |
---|
259 | 276 | phys_addr_t match; |
---|
260 | 277 | phys_addr_t remap; |
---|
.. | .. |
---|
273 | 290 | DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); |
---|
274 | 291 | struct mutex msi_used_lock; |
---|
275 | 292 | u16 msi_msg; |
---|
276 | | - int root_bus_nr; |
---|
277 | 293 | int link_gen; |
---|
| 294 | + struct pci_bridge_emul bridge; |
---|
278 | 295 | struct gpio_desc *reset_gpio; |
---|
| 296 | + struct phy *phy; |
---|
279 | 297 | }; |
---|
280 | 298 | |
---|
281 | 299 | static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg) |
---|
.. | .. |
---|
305 | 323 | return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED; |
---|
306 | 324 | } |
---|
307 | 325 | |
---|
| 326 | +static inline bool advk_pcie_link_active(struct advk_pcie *pcie) |
---|
| 327 | +{ |
---|
| 328 | + /* |
---|
| 329 | + * According to PCIe Base specification 3.0, Table 4-14: Link |
---|
| 330 | + * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle |
---|
| 331 | + * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0, |
---|
| 332 | + * L0s, L1 and L2 states. And according to 3.2.1. Data Link |
---|
| 333 | + * Control and Management State Machine Rules is DL Up status |
---|
| 334 | + * reported in DL Active state. |
---|
| 335 | + */ |
---|
| 336 | + u8 ltssm_state = advk_pcie_ltssm_state(pcie); |
---|
| 337 | + return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED; |
---|
| 338 | +} |
---|
| 339 | + |
---|
308 | 340 | static inline bool advk_pcie_link_training(struct advk_pcie *pcie) |
---|
309 | 341 | { |
---|
310 | 342 | /* |
---|
311 | | - * According to PCIe Base specification 3.0, Table 4-14: Link |
---|
312 | | - * Status Mapped to the LTSSM is Link Training mapped to LTSSM |
---|
313 | | - * Configuration and Recovery states. |
---|
314 | | - */ |
---|
| 343 | + * According to PCIe Base specification 3.0, Table 4-14: Link |
---|
| 344 | + * Status Mapped to the LTSSM is Link Training mapped to LTSSM |
---|
| 345 | + * Configuration and Recovery states. |
---|
| 346 | + */ |
---|
315 | 347 | u8 ltssm_state = advk_pcie_ltssm_state(pcie); |
---|
316 | 348 | return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START && |
---|
317 | | - ltssm_state < LTSSM_L0) || |
---|
| 349 | + ltssm_state < LTSSM_L0) || |
---|
318 | 350 | (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 && |
---|
319 | | - ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3)); |
---|
| 351 | + ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3)); |
---|
320 | 352 | } |
---|
321 | 353 | |
---|
322 | 354 | static int advk_pcie_wait_for_link(struct advk_pcie *pcie) |
---|
.. | .. |
---|
332 | 364 | } |
---|
333 | 365 | |
---|
334 | 366 | return -ETIMEDOUT; |
---|
| 367 | +} |
---|
| 368 | + |
---|
| 369 | +static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie) |
---|
| 370 | +{ |
---|
| 371 | + size_t retries; |
---|
| 372 | + |
---|
| 373 | + for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) { |
---|
| 374 | + if (advk_pcie_link_training(pcie)) |
---|
| 375 | + break; |
---|
| 376 | + udelay(RETRAIN_WAIT_USLEEP_US); |
---|
| 377 | + } |
---|
335 | 378 | } |
---|
336 | 379 | |
---|
337 | 380 | static void advk_pcie_issue_perst(struct advk_pcie *pcie) |
---|
.. | .. |
---|
444 | 487 | u32 reg; |
---|
445 | 488 | int i; |
---|
446 | 489 | |
---|
| 490 | + /* |
---|
| 491 | + * Configure PCIe Reference clock. Direction is from the PCIe |
---|
| 492 | + * controller to the endpoint card, so enable transmitting of |
---|
| 493 | + * Reference clock differential signal off-chip and disable |
---|
| 494 | + * receiving off-chip differential signal. |
---|
| 495 | + */ |
---|
| 496 | + reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG); |
---|
| 497 | + reg |= PCIE_CORE_REF_CLK_TX_ENABLE; |
---|
| 498 | + reg &= ~PCIE_CORE_REF_CLK_RX_ENABLE; |
---|
| 499 | + advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG); |
---|
| 500 | + |
---|
447 | 501 | /* Set to Direct mode */ |
---|
448 | 502 | reg = advk_readl(pcie, CTRL_CONFIG_REG); |
---|
449 | 503 | reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT); |
---|
.. | .. |
---|
454 | 508 | reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); |
---|
455 | 509 | reg |= (IS_RC_MSK << IS_RC_SHIFT); |
---|
456 | 510 | advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); |
---|
| 511 | + |
---|
| 512 | + /* |
---|
| 513 | + * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab. |
---|
| 514 | + * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor |
---|
| 515 | + * id in high 16 bits. Updating this register changes readback value of |
---|
| 516 | + * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround |
---|
| 517 | + * for erratum 4.1: "The value of device and vendor ID is incorrect". |
---|
| 518 | + */ |
---|
| 519 | + reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL; |
---|
| 520 | + advk_writel(pcie, reg, VENDOR_ID_REG); |
---|
| 521 | + |
---|
| 522 | + /* |
---|
| 523 | + * Change Class Code of PCI Bridge device to PCI Bridge (0x600400), |
---|
| 524 | + * because the default value is Mass storage controller (0x010400). |
---|
| 525 | + * |
---|
| 526 | + * Note that this Aardvark PCI Bridge does not have compliant Type 1 |
---|
| 527 | + * Configuration Space and it even cannot be accessed via Aardvark's |
---|
| 528 | + * PCI config space access method. Something like config space is |
---|
| 529 | + * available in internal Aardvark registers starting at offset 0x0 |
---|
| 530 | + * and is reported as Type 0. In range 0x10 - 0x34 it has totally |
---|
| 531 | + * different registers. |
---|
| 532 | + * |
---|
| 533 | + * Therefore driver uses emulation of PCI Bridge which emulates |
---|
| 534 | + * access to configuration space via internal Aardvark registers or |
---|
| 535 | + * emulated configuration buffer. |
---|
| 536 | + */ |
---|
| 537 | + reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG); |
---|
| 538 | + reg &= ~0xffffff00; |
---|
| 539 | + reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8; |
---|
| 540 | + advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG); |
---|
| 541 | + |
---|
| 542 | + /* Disable Root Bridge I/O space, memory space and bus mastering */ |
---|
| 543 | + reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); |
---|
| 544 | + reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); |
---|
| 545 | + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG); |
---|
457 | 546 | |
---|
458 | 547 | /* Set Advanced Error Capabilities and Control PF0 register */ |
---|
459 | 548 | reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | |
---|
.. | .. |
---|
489 | 578 | advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); |
---|
490 | 579 | |
---|
491 | 580 | /* Clear all interrupts */ |
---|
| 581 | + advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); |
---|
492 | 582 | advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); |
---|
493 | 583 | advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); |
---|
494 | 584 | advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); |
---|
.. | .. |
---|
500 | 590 | |
---|
501 | 591 | advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); |
---|
502 | 592 | |
---|
503 | | - /* Unmask all MSI's */ |
---|
504 | | - advk_writel(pcie, 0, PCIE_MSI_MASK_REG); |
---|
| 593 | + /* Unmask all MSIs */ |
---|
| 594 | + advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); |
---|
505 | 595 | |
---|
506 | 596 | /* Enable summary interrupt for GIC SPI source */ |
---|
507 | 597 | reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); |
---|
.. | .. |
---|
553 | 643 | advk_pcie_disable_ob_win(pcie, i); |
---|
554 | 644 | |
---|
555 | 645 | advk_pcie_train_link(pcie); |
---|
556 | | - |
---|
557 | | - reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); |
---|
558 | | - reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | |
---|
559 | | - PCIE_CORE_CMD_IO_ACCESS_EN | |
---|
560 | | - PCIE_CORE_CMD_MEM_IO_REQ_EN; |
---|
561 | | - advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG); |
---|
562 | 646 | } |
---|
563 | 647 | |
---|
564 | | -static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val) |
---|
| 648 | +static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val) |
---|
565 | 649 | { |
---|
566 | 650 | struct device *dev = &pcie->pdev->dev; |
---|
567 | 651 | u32 reg; |
---|
568 | 652 | unsigned int status; |
---|
569 | 653 | char *strcomp_status, *str_posted; |
---|
| 654 | + int ret; |
---|
570 | 655 | |
---|
571 | 656 | reg = advk_readl(pcie, PIO_STAT); |
---|
572 | 657 | status = (reg & PIO_COMPLETION_STATUS_MASK) >> |
---|
.. | .. |
---|
591 | 676 | case PIO_COMPLETION_STATUS_OK: |
---|
592 | 677 | if (reg & PIO_ERR_STATUS) { |
---|
593 | 678 | strcomp_status = "COMP_ERR"; |
---|
| 679 | + ret = -EFAULT; |
---|
594 | 680 | break; |
---|
595 | 681 | } |
---|
596 | 682 | /* Get the read result */ |
---|
.. | .. |
---|
598 | 684 | *val = advk_readl(pcie, PIO_RD_DATA); |
---|
599 | 685 | /* No error */ |
---|
600 | 686 | strcomp_status = NULL; |
---|
| 687 | + ret = 0; |
---|
601 | 688 | break; |
---|
602 | 689 | case PIO_COMPLETION_STATUS_UR: |
---|
603 | 690 | strcomp_status = "UR"; |
---|
| 691 | + ret = -EOPNOTSUPP; |
---|
604 | 692 | break; |
---|
605 | 693 | case PIO_COMPLETION_STATUS_CRS: |
---|
| 694 | + if (allow_crs && val) { |
---|
| 695 | + /* PCIe r4.0, sec 2.3.2, says: |
---|
| 696 | + * If CRS Software Visibility is enabled: |
---|
| 697 | + * For a Configuration Read Request that includes both |
---|
| 698 | + * bytes of the Vendor ID field of a device Function's |
---|
| 699 | + * Configuration Space Header, the Root Complex must |
---|
| 700 | + * complete the Request to the host by returning a |
---|
| 701 | + * read-data value of 0001h for the Vendor ID field and |
---|
| 702 | + * all '1's for any additional bytes included in the |
---|
| 703 | + * request. |
---|
| 704 | + * |
---|
| 705 | + * So CRS in this case is not an error status. |
---|
| 706 | + */ |
---|
| 707 | + *val = CFG_RD_CRS_VAL; |
---|
| 708 | + strcomp_status = NULL; |
---|
| 709 | + ret = 0; |
---|
| 710 | + break; |
---|
| 711 | + } |
---|
606 | 712 | /* PCIe r4.0, sec 2.3.2, says: |
---|
607 | 713 | * If CRS Software Visibility is not enabled, the Root Complex |
---|
608 | 714 | * must re-issue the Configuration Request as a new Request. |
---|
| 715 | + * If CRS Software Visibility is enabled: For a Configuration |
---|
| 716 | + * Write Request or for any other Configuration Read Request, |
---|
| 717 | + * the Root Complex must re-issue the Configuration Request as |
---|
| 718 | + * a new Request. |
---|
609 | 719 | * A Root Complex implementation may choose to limit the number |
---|
610 | 720 | * of Configuration Request/CRS Completion Status loops before |
---|
611 | 721 | * determining that something is wrong with the target of the |
---|
612 | 722 | * Request and taking appropriate action, e.g., complete the |
---|
613 | 723 | * Request to the host as a failed transaction. |
---|
614 | 724 | * |
---|
615 | | - * To simplify implementation do not re-issue the Configuration |
---|
616 | | - * Request and complete the Request as a failed transaction. |
---|
| 725 | + * So return -EAGAIN and caller (pci-aardvark.c driver) will |
---|
| 726 | + * re-issue request again up to the PIO_RETRY_CNT retries. |
---|
617 | 727 | */ |
---|
618 | 728 | strcomp_status = "CRS"; |
---|
| 729 | + ret = -EAGAIN; |
---|
619 | 730 | break; |
---|
620 | 731 | case PIO_COMPLETION_STATUS_CA: |
---|
621 | 732 | strcomp_status = "CA"; |
---|
| 733 | + ret = -ECANCELED; |
---|
622 | 734 | break; |
---|
623 | 735 | default: |
---|
624 | 736 | strcomp_status = "Unknown"; |
---|
| 737 | + ret = -EINVAL; |
---|
625 | 738 | break; |
---|
626 | 739 | } |
---|
627 | 740 | |
---|
628 | 741 | if (!strcomp_status) |
---|
629 | | - return 0; |
---|
| 742 | + return ret; |
---|
630 | 743 | |
---|
631 | 744 | if (reg & PIO_NON_POSTED_REQ) |
---|
632 | 745 | str_posted = "Non-posted"; |
---|
.. | .. |
---|
636 | 749 | dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n", |
---|
637 | 750 | str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); |
---|
638 | 751 | |
---|
639 | | - return -EFAULT; |
---|
| 752 | + return ret; |
---|
640 | 753 | } |
---|
641 | 754 | |
---|
642 | 755 | static int advk_pcie_wait_pio(struct advk_pcie *pcie) |
---|
.. | .. |
---|
644 | 757 | struct device *dev = &pcie->pdev->dev; |
---|
645 | 758 | int i; |
---|
646 | 759 | |
---|
647 | | - for (i = 0; i < PIO_RETRY_CNT; i++) { |
---|
| 760 | + for (i = 1; i <= PIO_RETRY_CNT; i++) { |
---|
648 | 761 | u32 start, isr; |
---|
649 | 762 | |
---|
650 | 763 | start = advk_readl(pcie, PIO_START); |
---|
651 | 764 | isr = advk_readl(pcie, PIO_ISR); |
---|
652 | 765 | if (!start && isr) |
---|
653 | | - return 0; |
---|
| 766 | + return i; |
---|
654 | 767 | udelay(PIO_RETRY_DELAY); |
---|
655 | 768 | } |
---|
656 | 769 | |
---|
.. | .. |
---|
658 | 771 | return -ETIMEDOUT; |
---|
659 | 772 | } |
---|
660 | 773 | |
---|
| 774 | +static pci_bridge_emul_read_status_t |
---|
| 775 | +advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, |
---|
| 776 | + int reg, u32 *value) |
---|
| 777 | +{ |
---|
| 778 | + struct advk_pcie *pcie = bridge->data; |
---|
| 779 | + |
---|
| 780 | + switch (reg) { |
---|
| 781 | + case PCI_COMMAND: |
---|
| 782 | + *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); |
---|
| 783 | + return PCI_BRIDGE_EMUL_HANDLED; |
---|
| 784 | + |
---|
| 785 | + case PCI_INTERRUPT_LINE: { |
---|
| 786 | + /* |
---|
| 787 | + * From the whole 32bit register we support reading from HW only |
---|
| 788 | + * one bit: PCI_BRIDGE_CTL_BUS_RESET. |
---|
| 789 | + * Other bits are retrieved only from emulated config buffer. |
---|
| 790 | + */ |
---|
| 791 | + __le32 *cfgspace = (__le32 *)&bridge->conf; |
---|
| 792 | + u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); |
---|
| 793 | + if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN) |
---|
| 794 | + val |= PCI_BRIDGE_CTL_BUS_RESET << 16; |
---|
| 795 | + else |
---|
| 796 | + val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16); |
---|
| 797 | + *value = val; |
---|
| 798 | + return PCI_BRIDGE_EMUL_HANDLED; |
---|
| 799 | + } |
---|
| 800 | + |
---|
| 801 | + default: |
---|
| 802 | + return PCI_BRIDGE_EMUL_NOT_HANDLED; |
---|
| 803 | + } |
---|
| 804 | +} |
---|
| 805 | + |
---|
| 806 | +static void |
---|
| 807 | +advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, |
---|
| 808 | + int reg, u32 old, u32 new, u32 mask) |
---|
| 809 | +{ |
---|
| 810 | + struct advk_pcie *pcie = bridge->data; |
---|
| 811 | + |
---|
| 812 | + switch (reg) { |
---|
| 813 | + case PCI_COMMAND: |
---|
| 814 | + advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG); |
---|
| 815 | + break; |
---|
| 816 | + |
---|
| 817 | + case PCI_INTERRUPT_LINE: |
---|
| 818 | + if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { |
---|
| 819 | + u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG); |
---|
| 820 | + if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) |
---|
| 821 | + val |= HOT_RESET_GEN; |
---|
| 822 | + else |
---|
| 823 | + val &= ~HOT_RESET_GEN; |
---|
| 824 | + advk_writel(pcie, val, PCIE_CORE_CTRL1_REG); |
---|
| 825 | + } |
---|
| 826 | + break; |
---|
| 827 | + |
---|
| 828 | + default: |
---|
| 829 | + break; |
---|
| 830 | + } |
---|
| 831 | +} |
---|
| 832 | + |
---|
| 833 | +static pci_bridge_emul_read_status_t |
---|
| 834 | +advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, |
---|
| 835 | + int reg, u32 *value) |
---|
| 836 | +{ |
---|
| 837 | + struct advk_pcie *pcie = bridge->data; |
---|
| 838 | + |
---|
| 839 | + |
---|
| 840 | + switch (reg) { |
---|
| 841 | + case PCI_EXP_SLTCTL: |
---|
| 842 | + *value = PCI_EXP_SLTSTA_PDS << 16; |
---|
| 843 | + return PCI_BRIDGE_EMUL_HANDLED; |
---|
| 844 | + |
---|
| 845 | + case PCI_EXP_RTCTL: { |
---|
| 846 | + u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); |
---|
| 847 | + *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE; |
---|
| 848 | + *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE; |
---|
| 849 | + *value |= PCI_EXP_RTCAP_CRSVIS << 16; |
---|
| 850 | + return PCI_BRIDGE_EMUL_HANDLED; |
---|
| 851 | + } |
---|
| 852 | + |
---|
| 853 | + case PCI_EXP_RTSTA: { |
---|
| 854 | + u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG); |
---|
| 855 | + u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG); |
---|
| 856 | + *value = msglog >> 16; |
---|
| 857 | + if (isr0 & PCIE_MSG_PM_PME_MASK) |
---|
| 858 | + *value |= PCI_EXP_RTSTA_PME; |
---|
| 859 | + return PCI_BRIDGE_EMUL_HANDLED; |
---|
| 860 | + } |
---|
| 861 | + |
---|
| 862 | + case PCI_EXP_LNKCAP: { |
---|
| 863 | + u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); |
---|
| 864 | + /* |
---|
| 865 | + * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0. |
---|
| 866 | + * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm |
---|
| 867 | + * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag. |
---|
| 868 | + */ |
---|
| 869 | + val |= PCI_EXP_LNKCAP_DLLLARC; |
---|
| 870 | + *value = val; |
---|
| 871 | + return PCI_BRIDGE_EMUL_HANDLED; |
---|
| 872 | + } |
---|
| 873 | + |
---|
| 874 | + case PCI_EXP_LNKCTL: { |
---|
| 875 | + /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */ |
---|
| 876 | + u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) & |
---|
| 877 | + ~(PCI_EXP_LNKSTA_LT << 16); |
---|
| 878 | + if (advk_pcie_link_training(pcie)) |
---|
| 879 | + val |= (PCI_EXP_LNKSTA_LT << 16); |
---|
| 880 | + if (advk_pcie_link_active(pcie)) |
---|
| 881 | + val |= (PCI_EXP_LNKSTA_DLLLA << 16); |
---|
| 882 | + *value = val; |
---|
| 883 | + return PCI_BRIDGE_EMUL_HANDLED; |
---|
| 884 | + } |
---|
| 885 | + |
---|
| 886 | + case PCI_EXP_DEVCAP: |
---|
| 887 | + case PCI_EXP_DEVCTL: |
---|
| 888 | + *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); |
---|
| 889 | + return PCI_BRIDGE_EMUL_HANDLED; |
---|
| 890 | + default: |
---|
| 891 | + return PCI_BRIDGE_EMUL_NOT_HANDLED; |
---|
| 892 | + } |
---|
| 893 | + |
---|
| 894 | +} |
---|
| 895 | + |
---|
| 896 | +static void |
---|
| 897 | +advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, |
---|
| 898 | + int reg, u32 old, u32 new, u32 mask) |
---|
| 899 | +{ |
---|
| 900 | + struct advk_pcie *pcie = bridge->data; |
---|
| 901 | + |
---|
| 902 | + switch (reg) { |
---|
| 903 | + case PCI_EXP_DEVCTL: |
---|
| 904 | + advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); |
---|
| 905 | + break; |
---|
| 906 | + |
---|
| 907 | + case PCI_EXP_LNKCTL: |
---|
| 908 | + advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); |
---|
| 909 | + if (new & PCI_EXP_LNKCTL_RL) |
---|
| 910 | + advk_pcie_wait_for_retrain(pcie); |
---|
| 911 | + break; |
---|
| 912 | + |
---|
| 913 | + case PCI_EXP_RTCTL: { |
---|
| 914 | + /* Only mask/unmask PME interrupt */ |
---|
| 915 | + u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) & |
---|
| 916 | + ~PCIE_MSG_PM_PME_MASK; |
---|
| 917 | + if ((new & PCI_EXP_RTCTL_PMEIE) == 0) |
---|
| 918 | + val |= PCIE_MSG_PM_PME_MASK; |
---|
| 919 | + advk_writel(pcie, val, PCIE_ISR0_MASK_REG); |
---|
| 920 | + break; |
---|
| 921 | + } |
---|
| 922 | + |
---|
| 923 | + case PCI_EXP_RTSTA: |
---|
| 924 | + new = (new & PCI_EXP_RTSTA_PME) >> 9; |
---|
| 925 | + advk_writel(pcie, new, PCIE_ISR0_REG); |
---|
| 926 | + break; |
---|
| 927 | + |
---|
| 928 | + default: |
---|
| 929 | + break; |
---|
| 930 | + } |
---|
| 931 | +} |
---|
| 932 | + |
---|
| 933 | +static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { |
---|
| 934 | + .read_base = advk_pci_bridge_emul_base_conf_read, |
---|
| 935 | + .write_base = advk_pci_bridge_emul_base_conf_write, |
---|
| 936 | + .read_pcie = advk_pci_bridge_emul_pcie_conf_read, |
---|
| 937 | + .write_pcie = advk_pci_bridge_emul_pcie_conf_write, |
---|
| 938 | +}; |
---|
| 939 | + |
---|
| 940 | +/* |
---|
| 941 | + * Initialize the configuration space of the PCI-to-PCI bridge |
---|
| 942 | + * associated with the given PCIe interface. |
---|
| 943 | + */ |
---|
| 944 | +static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) |
---|
| 945 | +{ |
---|
| 946 | + struct pci_bridge_emul *bridge = &pcie->bridge; |
---|
| 947 | + |
---|
| 948 | + bridge->conf.vendor = |
---|
| 949 | + cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff); |
---|
| 950 | + bridge->conf.device = |
---|
| 951 | + cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16); |
---|
| 952 | + bridge->conf.class_revision = |
---|
| 953 | + cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff); |
---|
| 954 | + |
---|
| 955 | + /* Support 32 bits I/O addressing */ |
---|
| 956 | + bridge->conf.iobase = PCI_IO_RANGE_TYPE_32; |
---|
| 957 | + bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; |
---|
| 958 | + |
---|
| 959 | + /* Support 64 bits memory pref */ |
---|
| 960 | + bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64); |
---|
| 961 | + bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64); |
---|
| 962 | + |
---|
| 963 | + /* Support interrupt A for MSI feature */ |
---|
| 964 | + bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; |
---|
| 965 | + |
---|
| 966 | + /* Aardvark HW provides PCIe Capability structure in version 2 */ |
---|
| 967 | + bridge->pcie_conf.cap = cpu_to_le16(2); |
---|
| 968 | + |
---|
| 969 | + /* Indicates supports for Completion Retry Status */ |
---|
| 970 | + bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); |
---|
| 971 | + |
---|
| 972 | + bridge->has_pcie = true; |
---|
| 973 | + bridge->data = pcie; |
---|
| 974 | + bridge->ops = &advk_pci_bridge_emul_ops; |
---|
| 975 | + |
---|
| 976 | + return pci_bridge_emul_init(bridge, 0); |
---|
| 977 | +} |
---|
| 978 | + |
---|
661 | 979 | static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, |
---|
662 | 980 | int devfn) |
---|
663 | 981 | { |
---|
664 | | - if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) |
---|
| 982 | + if (pci_is_root_bus(bus) && PCI_SLOT(devfn) != 0) |
---|
665 | 983 | return false; |
---|
666 | 984 | |
---|
667 | 985 | /* |
---|
668 | 986 | * If the link goes down after we check for link-up, nothing bad |
---|
669 | 987 | * happens but the config access times out. |
---|
670 | 988 | */ |
---|
671 | | - if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie)) |
---|
| 989 | + if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie)) |
---|
672 | 990 | return false; |
---|
673 | 991 | |
---|
674 | 992 | return true; |
---|
.. | .. |
---|
707 | 1025 | int where, int size, u32 *val) |
---|
708 | 1026 | { |
---|
709 | 1027 | struct advk_pcie *pcie = bus->sysdata; |
---|
| 1028 | + int retry_count; |
---|
| 1029 | + bool allow_crs; |
---|
710 | 1030 | u32 reg; |
---|
711 | 1031 | int ret; |
---|
712 | 1032 | |
---|
.. | .. |
---|
715 | 1035 | return PCIBIOS_DEVICE_NOT_FOUND; |
---|
716 | 1036 | } |
---|
717 | 1037 | |
---|
718 | | - if (advk_pcie_pio_is_running(pcie)) { |
---|
719 | | - *val = 0xffffffff; |
---|
720 | | - return PCIBIOS_SET_FAILED; |
---|
721 | | - } |
---|
| 1038 | + if (pci_is_root_bus(bus)) |
---|
| 1039 | + return pci_bridge_emul_conf_read(&pcie->bridge, where, |
---|
| 1040 | + size, val); |
---|
| 1041 | + |
---|
| 1042 | + /* |
---|
| 1043 | + * Completion Retry Status is possible to return only when reading all |
---|
| 1044 | + * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and |
---|
| 1045 | + * CRSSVE flag on Root Bridge is enabled. |
---|
| 1046 | + */ |
---|
| 1047 | + allow_crs = (where == PCI_VENDOR_ID) && (size == 4) && |
---|
| 1048 | + (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & |
---|
| 1049 | + PCI_EXP_RTCTL_CRSSVE); |
---|
| 1050 | + |
---|
| 1051 | + if (advk_pcie_pio_is_running(pcie)) |
---|
| 1052 | + goto try_crs; |
---|
722 | 1053 | |
---|
723 | 1054 | /* Program the control register */ |
---|
724 | 1055 | reg = advk_readl(pcie, PIO_CTRL); |
---|
725 | 1056 | reg &= ~PIO_CTRL_TYPE_MASK; |
---|
726 | | - if (bus->number == pcie->root_bus_nr) |
---|
| 1057 | + if (pci_is_root_bus(bus->parent)) |
---|
727 | 1058 | reg |= PCIE_CONFIG_RD_TYPE0; |
---|
728 | 1059 | else |
---|
729 | 1060 | reg |= PCIE_CONFIG_RD_TYPE1; |
---|
.. | .. |
---|
737 | 1068 | /* Program the data strobe */ |
---|
738 | 1069 | advk_writel(pcie, 0xf, PIO_WR_DATA_STRB); |
---|
739 | 1070 | |
---|
740 | | - /* Clear PIO DONE ISR and start the transfer */ |
---|
741 | | - advk_writel(pcie, 1, PIO_ISR); |
---|
742 | | - advk_writel(pcie, 1, PIO_START); |
---|
| 1071 | + retry_count = 0; |
---|
| 1072 | + do { |
---|
| 1073 | + /* Clear PIO DONE ISR and start the transfer */ |
---|
| 1074 | + advk_writel(pcie, 1, PIO_ISR); |
---|
| 1075 | + advk_writel(pcie, 1, PIO_START); |
---|
743 | 1076 | |
---|
744 | | - ret = advk_pcie_wait_pio(pcie); |
---|
745 | | - if (ret < 0) { |
---|
746 | | - *val = 0xffffffff; |
---|
747 | | - return PCIBIOS_SET_FAILED; |
---|
748 | | - } |
---|
| 1077 | + ret = advk_pcie_wait_pio(pcie); |
---|
| 1078 | + if (ret < 0) |
---|
| 1079 | + goto try_crs; |
---|
749 | 1080 | |
---|
750 | | - /* Check PIO status and get the read result */ |
---|
751 | | - ret = advk_pcie_check_pio_status(pcie, val); |
---|
752 | | - if (ret < 0) { |
---|
753 | | - *val = 0xffffffff; |
---|
754 | | - return PCIBIOS_SET_FAILED; |
---|
755 | | - } |
---|
| 1081 | + retry_count += ret; |
---|
| 1082 | + |
---|
| 1083 | + /* Check PIO status and get the read result */ |
---|
| 1084 | + ret = advk_pcie_check_pio_status(pcie, allow_crs, val); |
---|
| 1085 | + } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); |
---|
| 1086 | + |
---|
| 1087 | + if (ret < 0) |
---|
| 1088 | + goto fail; |
---|
756 | 1089 | |
---|
757 | 1090 | if (size == 1) |
---|
758 | 1091 | *val = (*val >> (8 * (where & 3))) & 0xff; |
---|
.. | .. |
---|
760 | 1093 | *val = (*val >> (8 * (where & 3))) & 0xffff; |
---|
761 | 1094 | |
---|
762 | 1095 | return PCIBIOS_SUCCESSFUL; |
---|
| 1096 | + |
---|
| 1097 | +try_crs: |
---|
| 1098 | + /* |
---|
| 1099 | + * If it is possible, return Completion Retry Status so that caller |
---|
| 1100 | + * tries to issue the request again instead of failing. |
---|
| 1101 | + */ |
---|
| 1102 | + if (allow_crs) { |
---|
| 1103 | + *val = CFG_RD_CRS_VAL; |
---|
| 1104 | + return PCIBIOS_SUCCESSFUL; |
---|
| 1105 | + } |
---|
| 1106 | + |
---|
| 1107 | +fail: |
---|
| 1108 | + *val = 0xffffffff; |
---|
| 1109 | + return PCIBIOS_SET_FAILED; |
---|
763 | 1110 | } |
---|
764 | 1111 | |
---|
765 | 1112 | static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, |
---|
.. | .. |
---|
768 | 1115 | struct advk_pcie *pcie = bus->sysdata; |
---|
769 | 1116 | u32 reg; |
---|
770 | 1117 | u32 data_strobe = 0x0; |
---|
| 1118 | + int retry_count; |
---|
771 | 1119 | int offset; |
---|
772 | 1120 | int ret; |
---|
773 | 1121 | |
---|
774 | 1122 | if (!advk_pcie_valid_device(pcie, bus, devfn)) |
---|
775 | 1123 | return PCIBIOS_DEVICE_NOT_FOUND; |
---|
| 1124 | + |
---|
| 1125 | + if (pci_is_root_bus(bus)) |
---|
| 1126 | + return pci_bridge_emul_conf_write(&pcie->bridge, where, |
---|
| 1127 | + size, val); |
---|
776 | 1128 | |
---|
777 | 1129 | if (where % size) |
---|
778 | 1130 | return PCIBIOS_SET_FAILED; |
---|
.. | .. |
---|
783 | 1135 | /* Program the control register */ |
---|
784 | 1136 | reg = advk_readl(pcie, PIO_CTRL); |
---|
785 | 1137 | reg &= ~PIO_CTRL_TYPE_MASK; |
---|
786 | | - if (bus->number == pcie->root_bus_nr) |
---|
| 1138 | + if (pci_is_root_bus(bus->parent)) |
---|
787 | 1139 | reg |= PCIE_CONFIG_WR_TYPE0; |
---|
788 | 1140 | else |
---|
789 | 1141 | reg |= PCIE_CONFIG_WR_TYPE1; |
---|
.. | .. |
---|
805 | 1157 | /* Program the data strobe */ |
---|
806 | 1158 | advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB); |
---|
807 | 1159 | |
---|
808 | | - /* Clear PIO DONE ISR and start the transfer */ |
---|
809 | | - advk_writel(pcie, 1, PIO_ISR); |
---|
810 | | - advk_writel(pcie, 1, PIO_START); |
---|
| 1160 | + retry_count = 0; |
---|
| 1161 | + do { |
---|
| 1162 | + /* Clear PIO DONE ISR and start the transfer */ |
---|
| 1163 | + advk_writel(pcie, 1, PIO_ISR); |
---|
| 1164 | + advk_writel(pcie, 1, PIO_START); |
---|
811 | 1165 | |
---|
812 | | - ret = advk_pcie_wait_pio(pcie); |
---|
813 | | - if (ret < 0) |
---|
814 | | - return PCIBIOS_SET_FAILED; |
---|
| 1166 | + ret = advk_pcie_wait_pio(pcie); |
---|
| 1167 | + if (ret < 0) |
---|
| 1168 | + return PCIBIOS_SET_FAILED; |
---|
815 | 1169 | |
---|
816 | | - ret = advk_pcie_check_pio_status(pcie, NULL); |
---|
817 | | - if (ret < 0) |
---|
818 | | - return PCIBIOS_SET_FAILED; |
---|
| 1170 | + retry_count += ret; |
---|
819 | 1171 | |
---|
820 | | - return PCIBIOS_SUCCESSFUL; |
---|
| 1172 | + ret = advk_pcie_check_pio_status(pcie, false, NULL); |
---|
| 1173 | + } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); |
---|
| 1174 | + |
---|
| 1175 | + return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL; |
---|
821 | 1176 | } |
---|
822 | 1177 | |
---|
823 | 1178 | static struct pci_ops advk_pcie_ops = { |
---|
.. | .. |
---|
833 | 1188 | |
---|
834 | 1189 | msg->address_lo = lower_32_bits(msi_msg); |
---|
835 | 1190 | msg->address_hi = upper_32_bits(msi_msg); |
---|
836 | | - msg->data = data->irq; |
---|
| 1191 | + msg->data = data->hwirq; |
---|
837 | 1192 | } |
---|
838 | 1193 | |
---|
839 | 1194 | static int advk_msi_set_affinity(struct irq_data *irq_data, |
---|
.. | .. |
---|
850 | 1205 | int hwirq, i; |
---|
851 | 1206 | |
---|
852 | 1207 | mutex_lock(&pcie->msi_used_lock); |
---|
853 | | - hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, |
---|
854 | | - 0, nr_irqs, 0); |
---|
855 | | - if (hwirq >= MSI_IRQ_NUM) { |
---|
856 | | - mutex_unlock(&pcie->msi_used_lock); |
---|
857 | | - return -ENOSPC; |
---|
858 | | - } |
---|
859 | | - |
---|
860 | | - bitmap_set(pcie->msi_used, hwirq, nr_irqs); |
---|
| 1208 | + hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM, |
---|
| 1209 | + order_base_2(nr_irqs)); |
---|
861 | 1210 | mutex_unlock(&pcie->msi_used_lock); |
---|
| 1211 | + if (hwirq < 0) |
---|
| 1212 | + return -ENOSPC; |
---|
862 | 1213 | |
---|
863 | 1214 | for (i = 0; i < nr_irqs; i++) |
---|
864 | 1215 | irq_domain_set_info(domain, virq + i, hwirq + i, |
---|
.. | .. |
---|
876 | 1227 | struct advk_pcie *pcie = domain->host_data; |
---|
877 | 1228 | |
---|
878 | 1229 | mutex_lock(&pcie->msi_used_lock); |
---|
879 | | - bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); |
---|
| 1230 | + bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs)); |
---|
880 | 1231 | mutex_unlock(&pcie->msi_used_lock); |
---|
881 | 1232 | } |
---|
882 | 1233 | |
---|
.. | .. |
---|
1037 | 1388 | static void advk_pcie_handle_msi(struct advk_pcie *pcie) |
---|
1038 | 1389 | { |
---|
1039 | 1390 | u32 msi_val, msi_mask, msi_status, msi_idx; |
---|
1040 | | - u16 msi_data; |
---|
| 1391 | + int virq; |
---|
1041 | 1392 | |
---|
1042 | 1393 | msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); |
---|
1043 | 1394 | msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); |
---|
1044 | | - msi_status = msi_val & ~msi_mask; |
---|
| 1395 | + msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK); |
---|
1045 | 1396 | |
---|
1046 | 1397 | for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { |
---|
1047 | 1398 | if (!(BIT(msi_idx) & msi_status)) |
---|
1048 | 1399 | continue; |
---|
1049 | 1400 | |
---|
1050 | | - /* |
---|
1051 | | - * msi_idx contains bits [4:0] of the msi_data and msi_data |
---|
1052 | | - * contains 16bit MSI interrupt number |
---|
1053 | | - */ |
---|
1054 | 1401 | advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); |
---|
1055 | | - msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK; |
---|
1056 | | - generic_handle_irq(msi_data); |
---|
| 1402 | + virq = irq_find_mapping(pcie->msi_inner_domain, msi_idx); |
---|
| 1403 | + generic_handle_irq(virq); |
---|
1057 | 1404 | } |
---|
1058 | 1405 | |
---|
1059 | 1406 | advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, |
---|
.. | .. |
---|
1108 | 1455 | return IRQ_HANDLED; |
---|
1109 | 1456 | } |
---|
1110 | 1457 | |
---|
1111 | | -static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) |
---|
| 1458 | +static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie) |
---|
1112 | 1459 | { |
---|
1113 | | - int err, res_valid = 0; |
---|
1114 | | - struct device *dev = &pcie->pdev->dev; |
---|
1115 | | - struct resource_entry *win, *tmp; |
---|
1116 | | - resource_size_t iobase; |
---|
| 1460 | + phy_power_off(pcie->phy); |
---|
| 1461 | + phy_exit(pcie->phy); |
---|
| 1462 | +} |
---|
1117 | 1463 | |
---|
1118 | | - INIT_LIST_HEAD(&pcie->resources); |
---|
| 1464 | +static int advk_pcie_enable_phy(struct advk_pcie *pcie) |
---|
| 1465 | +{ |
---|
| 1466 | + int ret; |
---|
1119 | 1467 | |
---|
1120 | | - err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, |
---|
1121 | | - &pcie->resources, &iobase); |
---|
1122 | | - if (err) |
---|
1123 | | - return err; |
---|
| 1468 | + if (!pcie->phy) |
---|
| 1469 | + return 0; |
---|
1124 | 1470 | |
---|
1125 | | - err = devm_request_pci_bus_resources(dev, &pcie->resources); |
---|
1126 | | - if (err) |
---|
1127 | | - goto out_release_res; |
---|
| 1471 | + ret = phy_init(pcie->phy); |
---|
| 1472 | + if (ret) |
---|
| 1473 | + return ret; |
---|
1128 | 1474 | |
---|
1129 | | - resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { |
---|
1130 | | - struct resource *res = win->res; |
---|
1131 | | - |
---|
1132 | | - switch (resource_type(res)) { |
---|
1133 | | - case IORESOURCE_IO: |
---|
1134 | | - err = devm_pci_remap_iospace(dev, res, iobase); |
---|
1135 | | - if (err) { |
---|
1136 | | - dev_warn(dev, "error %d: failed to map resource %pR\n", |
---|
1137 | | - err, res); |
---|
1138 | | - resource_list_destroy_entry(win); |
---|
1139 | | - } |
---|
1140 | | - break; |
---|
1141 | | - case IORESOURCE_MEM: |
---|
1142 | | - res_valid |= !(res->flags & IORESOURCE_PREFETCH); |
---|
1143 | | - break; |
---|
1144 | | - case IORESOURCE_BUS: |
---|
1145 | | - pcie->root_bus_nr = res->start; |
---|
1146 | | - break; |
---|
1147 | | - } |
---|
| 1475 | + ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE); |
---|
| 1476 | + if (ret) { |
---|
| 1477 | + phy_exit(pcie->phy); |
---|
| 1478 | + return ret; |
---|
1148 | 1479 | } |
---|
1149 | 1480 | |
---|
1150 | | - if (!res_valid) { |
---|
1151 | | - dev_err(dev, "non-prefetchable memory resource required\n"); |
---|
1152 | | - err = -EINVAL; |
---|
1153 | | - goto out_release_res; |
---|
| 1481 | + ret = phy_power_on(pcie->phy); |
---|
| 1482 | + if (ret == -EOPNOTSUPP) { |
---|
| 1483 | + dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n"); |
---|
| 1484 | + } else if (ret) { |
---|
| 1485 | + phy_exit(pcie->phy); |
---|
| 1486 | + return ret; |
---|
1154 | 1487 | } |
---|
1155 | 1488 | |
---|
1156 | 1489 | return 0; |
---|
| 1490 | +} |
---|
1157 | 1491 | |
---|
1158 | | -out_release_res: |
---|
1159 | | - pci_free_resource_list(&pcie->resources); |
---|
1160 | | - return err; |
---|
| 1492 | +static int advk_pcie_setup_phy(struct advk_pcie *pcie) |
---|
| 1493 | +{ |
---|
| 1494 | + struct device *dev = &pcie->pdev->dev; |
---|
| 1495 | + struct device_node *node = dev->of_node; |
---|
| 1496 | + int ret = 0; |
---|
| 1497 | + |
---|
| 1498 | + pcie->phy = devm_of_phy_get(dev, node, NULL); |
---|
| 1499 | + if (IS_ERR(pcie->phy) && (PTR_ERR(pcie->phy) == -EPROBE_DEFER)) |
---|
| 1500 | + return PTR_ERR(pcie->phy); |
---|
| 1501 | + |
---|
| 1502 | + /* Old bindings miss the PHY handle */ |
---|
| 1503 | + if (IS_ERR(pcie->phy)) { |
---|
| 1504 | + dev_warn(dev, "PHY unavailable (%ld)\n", PTR_ERR(pcie->phy)); |
---|
| 1505 | + pcie->phy = NULL; |
---|
| 1506 | + return 0; |
---|
| 1507 | + } |
---|
| 1508 | + |
---|
| 1509 | + ret = advk_pcie_enable_phy(pcie); |
---|
| 1510 | + if (ret) |
---|
| 1511 | + dev_err(dev, "Failed to initialize PHY (%d)\n", ret); |
---|
| 1512 | + |
---|
| 1513 | + return ret; |
---|
1161 | 1514 | } |
---|
1162 | 1515 | |
---|
1163 | 1516 | static int advk_pcie_probe(struct platform_device *pdev) |
---|
1164 | 1517 | { |
---|
1165 | 1518 | struct device *dev = &pdev->dev; |
---|
1166 | 1519 | struct advk_pcie *pcie; |
---|
1167 | | - struct resource *res; |
---|
1168 | 1520 | struct pci_host_bridge *bridge; |
---|
1169 | 1521 | struct resource_entry *entry; |
---|
1170 | 1522 | int ret, irq; |
---|
.. | .. |
---|
1175 | 1527 | |
---|
1176 | 1528 | pcie = pci_host_bridge_priv(bridge); |
---|
1177 | 1529 | pcie->pdev = pdev; |
---|
| 1530 | + platform_set_drvdata(pdev, pcie); |
---|
1178 | 1531 | |
---|
1179 | | - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
---|
1180 | | - pcie->base = devm_ioremap_resource(dev, res); |
---|
1181 | | - if (IS_ERR(pcie->base)) |
---|
1182 | | - return PTR_ERR(pcie->base); |
---|
1183 | | - |
---|
1184 | | - irq = platform_get_irq(pdev, 0); |
---|
1185 | | - ret = devm_request_irq(dev, irq, advk_pcie_irq_handler, |
---|
1186 | | - IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", |
---|
1187 | | - pcie); |
---|
1188 | | - if (ret) { |
---|
1189 | | - dev_err(dev, "Failed to register interrupt\n"); |
---|
1190 | | - return ret; |
---|
1191 | | - } |
---|
1192 | | - |
---|
1193 | | - ret = advk_pcie_parse_request_of_pci_ranges(pcie); |
---|
1194 | | - if (ret) { |
---|
1195 | | - dev_err(dev, "Failed to parse resources\n"); |
---|
1196 | | - return ret; |
---|
1197 | | - } |
---|
1198 | | - |
---|
1199 | | - resource_list_for_each_entry(entry, &pcie->resources) { |
---|
| 1532 | + resource_list_for_each_entry(entry, &bridge->windows) { |
---|
1200 | 1533 | resource_size_t start = entry->res->start; |
---|
1201 | 1534 | resource_size_t size = resource_size(entry->res); |
---|
1202 | 1535 | unsigned long type = resource_type(entry->res); |
---|
.. | .. |
---|
1270 | 1603 | } |
---|
1271 | 1604 | } |
---|
1272 | 1605 | |
---|
| 1606 | + pcie->base = devm_platform_ioremap_resource(pdev, 0); |
---|
| 1607 | + if (IS_ERR(pcie->base)) |
---|
| 1608 | + return PTR_ERR(pcie->base); |
---|
| 1609 | + |
---|
| 1610 | + irq = platform_get_irq(pdev, 0); |
---|
| 1611 | + if (irq < 0) |
---|
| 1612 | + return irq; |
---|
| 1613 | + |
---|
| 1614 | + ret = devm_request_irq(dev, irq, advk_pcie_irq_handler, |
---|
| 1615 | + IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", |
---|
| 1616 | + pcie); |
---|
| 1617 | + if (ret) { |
---|
| 1618 | + dev_err(dev, "Failed to register interrupt\n"); |
---|
| 1619 | + return ret; |
---|
| 1620 | + } |
---|
| 1621 | + |
---|
1273 | 1622 | pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node, |
---|
1274 | 1623 | "reset-gpios", 0, |
---|
1275 | 1624 | GPIOD_OUT_LOW, |
---|
.. | .. |
---|
1292 | 1641 | else |
---|
1293 | 1642 | pcie->link_gen = ret; |
---|
1294 | 1643 | |
---|
| 1644 | + ret = advk_pcie_setup_phy(pcie); |
---|
| 1645 | + if (ret) |
---|
| 1646 | + return ret; |
---|
| 1647 | + |
---|
1295 | 1648 | advk_pcie_setup_hw(pcie); |
---|
| 1649 | + |
---|
| 1650 | + ret = advk_sw_pci_bridge_init(pcie); |
---|
| 1651 | + if (ret) { |
---|
| 1652 | + dev_err(dev, "Failed to register emulated root PCI bridge\n"); |
---|
| 1653 | + return ret; |
---|
| 1654 | + } |
---|
1296 | 1655 | |
---|
1297 | 1656 | ret = advk_pcie_init_irq_domain(pcie); |
---|
1298 | 1657 | if (ret) { |
---|
.. | .. |
---|
1307 | 1666 | return ret; |
---|
1308 | 1667 | } |
---|
1309 | 1668 | |
---|
1310 | | - list_splice_init(&pcie->resources, &bridge->windows); |
---|
1311 | | - bridge->dev.parent = dev; |
---|
1312 | 1669 | bridge->sysdata = pcie; |
---|
1313 | | - bridge->busnr = 0; |
---|
1314 | 1670 | bridge->ops = &advk_pcie_ops; |
---|
1315 | | - bridge->map_irq = of_irq_parse_and_map_pci; |
---|
1316 | | - bridge->swizzle_irq = pci_common_swizzle; |
---|
1317 | 1671 | |
---|
1318 | 1672 | ret = pci_host_probe(bridge); |
---|
1319 | 1673 | if (ret < 0) { |
---|
.. | .. |
---|
1325 | 1679 | return 0; |
---|
1326 | 1680 | } |
---|
1327 | 1681 | |
---|
| 1682 | +static int advk_pcie_remove(struct platform_device *pdev) |
---|
| 1683 | +{ |
---|
| 1684 | + struct advk_pcie *pcie = platform_get_drvdata(pdev); |
---|
| 1685 | + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); |
---|
| 1686 | + int i; |
---|
| 1687 | + |
---|
| 1688 | + pci_lock_rescan_remove(); |
---|
| 1689 | + pci_stop_root_bus(bridge->bus); |
---|
| 1690 | + pci_remove_root_bus(bridge->bus); |
---|
| 1691 | + pci_unlock_rescan_remove(); |
---|
| 1692 | + |
---|
| 1693 | + advk_pcie_remove_msi_irq_domain(pcie); |
---|
| 1694 | + advk_pcie_remove_irq_domain(pcie); |
---|
| 1695 | + |
---|
| 1696 | + /* Disable outbound address windows mapping */ |
---|
| 1697 | + for (i = 0; i < OB_WIN_COUNT; i++) |
---|
| 1698 | + advk_pcie_disable_ob_win(pcie, i); |
---|
| 1699 | + |
---|
| 1700 | + return 0; |
---|
| 1701 | +} |
---|
| 1702 | + |
---|
1328 | 1703 | static const struct of_device_id advk_pcie_of_match_table[] = { |
---|
1329 | 1704 | { .compatible = "marvell,armada-3700-pcie", }, |
---|
1330 | 1705 | {}, |
---|
1331 | 1706 | }; |
---|
| 1707 | +MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table); |
---|
1332 | 1708 | |
---|
1333 | 1709 | static struct platform_driver advk_pcie_driver = { |
---|
1334 | 1710 | .driver = { |
---|
1335 | 1711 | .name = "advk-pcie", |
---|
1336 | 1712 | .of_match_table = advk_pcie_of_match_table, |
---|
1337 | | - /* Driver unloading/unbinding currently not supported */ |
---|
1338 | | - .suppress_bind_attrs = true, |
---|
1339 | 1713 | }, |
---|
1340 | 1714 | .probe = advk_pcie_probe, |
---|
| 1715 | + .remove = advk_pcie_remove, |
---|
1341 | 1716 | }; |
---|
1342 | | -builtin_platform_driver(advk_pcie_driver); |
---|
| 1717 | +module_platform_driver(advk_pcie_driver); |
---|
| 1718 | + |
---|
| 1719 | +MODULE_DESCRIPTION("Aardvark PCIe controller"); |
---|
| 1720 | +MODULE_LICENSE("GPL v2"); |
---|