From f70575805708cabdedea7498aaa3f710fde4d920 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Wed, 31 Jan 2024 03:29:01 +0000 Subject: [PATCH] add lvds1024*800 --- kernel/drivers/ntb/hw/amd/ntb_hw_amd.c | 334 +++++++++++++++++++++++++++++++++++++++++++++---------- 1 files changed, 272 insertions(+), 62 deletions(-) diff --git a/kernel/drivers/ntb/hw/amd/ntb_hw_amd.c b/kernel/drivers/ntb/hw/amd/ntb_hw_amd.c index 0b1fbb5..ac401ad 100644 --- a/kernel/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/kernel/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -78,7 +78,7 @@ if (idx < 0 || idx > ndev->mw_count) return -EINVAL; - return 1 << idx; + return ndev->dev_data->mw_idx << idx; } static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx) @@ -160,8 +160,8 @@ } /* set and verify setting the limit */ - write64(limit, mmio + limit_reg); - reg_val = read64(mmio + limit_reg); + write64(limit, peer_mmio + limit_reg); + reg_val = read64(peer_mmio + limit_reg); if (reg_val != limit) { write64(base_addr, mmio + limit_reg); write64(0, peer_mmio + xlat_reg); @@ -183,8 +183,8 @@ } /* set and verify setting the limit */ - writel(limit, mmio + limit_reg); - reg_val = readl(mmio + limit_reg); + writel(limit, peer_mmio + limit_reg); + reg_val = readl(peer_mmio + limit_reg); if (reg_val != limit) { writel(base_addr, mmio + limit_reg); writel(0, peer_mmio + xlat_reg); @@ -195,26 +195,117 @@ return 0; } -static int amd_link_is_up(struct amd_ntb_dev *ndev) +static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev) { - if (!ndev->peer_sta) - return NTB_LNK_STA_ACTIVE(ndev->cntl_sta); + struct pci_dev *pdev = NULL; + struct pci_dev *pci_swds = NULL; + struct pci_dev *pci_swus = NULL; + u32 stat; + int rc; - if (ndev->peer_sta & AMD_LINK_UP_EVENT) { - ndev->peer_sta = 0; - return 1; + if (ndev->ntb.topo == NTB_TOPO_SEC) { + /* Locate the pointer to Downstream Switch for this device */ + pci_swds = pci_upstream_bridge(ndev->ntb.pdev); + if (pci_swds) { + /* + * Locate the pointer to Upstream Switch for + * the Downstream Switch. + */ + pci_swus = pci_upstream_bridge(pci_swds); + if (pci_swus) { + rc = pcie_capability_read_dword(pci_swus, + PCI_EXP_LNKCTL, + &stat); + if (rc) + return 0; + } else { + return 0; + } + } else { + return 0; + } + } else if (ndev->ntb.topo == NTB_TOPO_PRI) { + /* + * For NTB primary, we simply read the Link Status and control + * register of the NTB device itself. + */ + pdev = ndev->ntb.pdev; + rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat); + if (rc) + return 0; + } else { + /* Catch all for everything else */ + return 0; } - /* If peer_sta is reset or D0 event, the ISR has - * started a timer to check link status of hardware. - * So here just clear status bit. And if peer_sta is - * D3 or PME_TO, D0/reset event will be happened when - * system wakeup/poweron, so do nothing here. + ndev->lnk_sta = stat; + + return 1; +} + +static int amd_link_is_up(struct amd_ntb_dev *ndev) +{ + int ret; + + /* + * We consider the link to be up under two conditions: + * + * - When a link-up event is received. This is indicated by + * AMD_LINK_UP_EVENT set in peer_sta. + * - When driver on both sides of the link have been loaded. + * This is indicated by bit 1 being set in the peer + * SIDEINFO register. + * + * This function should return 1 when the latter of the above + * two conditions is true. + * + * Now consider the sequence of events - Link-Up event occurs, + * then the peer side driver loads. In this case, we would have + * received LINK_UP event and bit 1 of peer SIDEINFO is also + * set. What happens now if the link goes down? Bit 1 of + * peer SIDEINFO remains set, but LINK_DOWN bit is set in + * peer_sta. So we should return 0 from this function. Not only + * that, we clear bit 1 of peer SIDEINFO to 0, since the peer + * side driver did not even get a chance to clear it before + * the link went down. This can be the case of surprise link + * removal. + * + * LINK_UP event will always occur before the peer side driver + * gets loaded the very first time. So there can be a case when + * the LINK_UP event has occurred, but the peer side driver hasn't + * yet loaded. We return 0 in that case. + * + * There is also a special case when the primary side driver is + * unloaded and then loaded again. Since there is no change in + * the status of NTB secondary in this case, there is no Link-Up + * or Link-Down notification received. We recognize this condition + * with peer_sta being set to 0. + * + * If bit 1 of peer SIDEINFO register is not set, then we + * simply return 0 irrespective of the link up or down status + * set in peer_sta. */ - if (ndev->peer_sta & AMD_PEER_RESET_EVENT) - ndev->peer_sta &= ~AMD_PEER_RESET_EVENT; - else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT)) - ndev->peer_sta = 0; + ret = amd_poll_link(ndev); + if (ret) { + /* + * We need to check the below only for NTB primary. For NTB + * secondary, simply checking the result of PSIDE_INFO + * register will suffice. + */ + if (ndev->ntb.topo == NTB_TOPO_PRI) { + if ((ndev->peer_sta & AMD_LINK_UP_EVENT) || + (ndev->peer_sta == 0)) + return ret; + else if (ndev->peer_sta & AMD_LINK_DOWN_EVENT) { + /* Clear peer sideinfo register */ + amd_clear_side_info_reg(ndev, true); + + return 0; + } + } else { /* NTB_TOPO_SEC */ + return ret; + } + } return 0; } @@ -253,7 +344,6 @@ { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; - u32 ntb_ctl; /* Enable event interrupt */ ndev->int_mask &= ~AMD_EVENT_INTMASK; @@ -263,10 +353,6 @@ return -EINVAL; dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); - ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); - ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); - writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); - return 0; } @@ -274,7 +360,6 @@ { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; - u32 ntb_ctl; /* Disable event interrupt */ ndev->int_mask |= AMD_EVENT_INTMASK; @@ -283,10 +368,6 @@ if (ndev->ntb.topo == NTB_TOPO_SEC) return -EINVAL; dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); - - ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); - ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); - writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); return 0; } @@ -333,7 +414,7 @@ if (db_vector < 0 || db_vector > ndev->db_count) return 0; - return ntb_ndev(ntb)->db_valid_mask & (1 << db_vector); + return ntb_ndev(ntb)->db_valid_mask & (1ULL << db_vector); } static u64 amd_ntb_db_read(struct ntb_dev *ntb) @@ -493,8 +574,6 @@ reg = readl(mmio + AMD_SMUACK_OFFSET); reg |= bit; writel(reg, mmio + AMD_SMUACK_OFFSET); - - ndev->peer_sta |= bit; } static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) @@ -512,10 +591,16 @@ status &= AMD_EVENT_INTMASK; switch (status) { case AMD_PEER_FLUSH_EVENT: + ndev->peer_sta |= AMD_PEER_FLUSH_EVENT; dev_info(dev, "Flush is done.\n"); break; case AMD_PEER_RESET_EVENT: - amd_ack_smu(ndev, AMD_PEER_RESET_EVENT); + case AMD_LINK_DOWN_EVENT: + ndev->peer_sta |= status; + if (status == AMD_LINK_DOWN_EVENT) + ndev->peer_sta &= ~AMD_LINK_UP_EVENT; + + amd_ack_smu(ndev, status); /* link down first */ ntb_link_event(&ndev->ntb); @@ -526,7 +611,12 @@ case AMD_PEER_D3_EVENT: case AMD_PEER_PMETO_EVENT: case AMD_LINK_UP_EVENT: - case AMD_LINK_DOWN_EVENT: + ndev->peer_sta |= status; + if (status == AMD_LINK_UP_EVENT) + ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT; + else if (status == AMD_PEER_D3_EVENT) + ndev->peer_sta &= ~AMD_PEER_D0_EVENT; + amd_ack_smu(ndev, status); /* link down */ @@ -540,6 +630,8 @@ if (status & 0x1) dev_info(dev, "Wakeup is done.\n"); + ndev->peer_sta |= AMD_PEER_D0_EVENT; + ndev->peer_sta &= ~AMD_PEER_D3_EVENT; amd_ack_smu(ndev, AMD_PEER_D0_EVENT); /* start a timer to poll link status */ @@ -550,6 +642,39 @@ dev_info(dev, "event status = 0x%x.\n", status); break; } + + /* Clear the interrupt status */ + writel(status, mmio + AMD_INTSTAT_OFFSET); +} + +static void amd_handle_db_event(struct amd_ntb_dev *ndev, int vec) +{ + struct device *dev = &ndev->ntb.pdev->dev; + u64 status; + + status = amd_ntb_db_read(&ndev->ntb); + + dev_dbg(dev, "status = 0x%llx and vec = %d\n", status, vec); + + /* + * Since we had reserved highest order bit of DB for signaling peer of + * a special event, this is the only status bit we should be concerned + * here now. + */ + if (status & BIT(ndev->db_last_bit)) { + ntb_db_clear(&ndev->ntb, BIT(ndev->db_last_bit)); + /* send link down event notification */ + ntb_link_event(&ndev->ntb); + + /* + * If we are here, that means the peer has signalled a special + * event which notifies that the peer driver has been + * un-loaded for some reason. Since there is a chance that the + * peer will load its driver again sometime, we schedule link + * polling routine. + */ + schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); + } } static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec) @@ -559,8 +684,10 @@ if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1)) amd_handle_event(ndev, vec); - if (vec < AMD_DB_CNT) + if (vec < AMD_DB_CNT) { + amd_handle_db_event(ndev, vec); ntb_db_event(&ndev->ntb, vec); + } return IRQ_HANDLED; } @@ -842,26 +969,18 @@ static int amd_poll_link(struct amd_ntb_dev *ndev) { void __iomem *mmio = ndev->peer_mmio; - u32 reg, stat; - int rc; + u32 reg; reg = readl(mmio + AMD_SIDEINFO_OFFSET); - reg &= NTB_LIN_STA_ACTIVE_BIT; + reg &= AMD_SIDE_READY; dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg); - if (reg == ndev->cntl_sta) - return 0; - ndev->cntl_sta = reg; - rc = pci_read_config_dword(ndev->ntb.pdev, - AMD_LINK_STATUS_OFFSET, &stat); - if (rc) - return 0; - ndev->lnk_sta = stat; + amd_ntb_get_link_status(ndev); - return 1; + return ndev->cntl_sta; } static void amd_link_hb(struct work_struct *work) @@ -880,10 +999,15 @@ return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT); } -static void amd_init_side_info(struct amd_ntb_dev *ndev) +static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer) { - void __iomem *mmio = ndev->self_mmio; + void __iomem *mmio = NULL; unsigned int reg; + + if (peer) + mmio = ndev->peer_mmio; + else + mmio = ndev->self_mmio; reg = readl(mmio + AMD_SIDEINFO_OFFSET); if (!(reg & AMD_SIDE_READY)) { @@ -892,10 +1016,15 @@ } } -static void amd_deinit_side_info(struct amd_ntb_dev *ndev) +static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer) { - void __iomem *mmio = ndev->self_mmio; + void __iomem *mmio = NULL; unsigned int reg; + + if (peer) + mmio = ndev->peer_mmio; + else + mmio = ndev->self_mmio; reg = readl(mmio + AMD_SIDEINFO_OFFSET); if (reg & AMD_SIDE_READY) { @@ -905,11 +1034,35 @@ } } +static void amd_init_side_info(struct amd_ntb_dev *ndev) +{ + void __iomem *mmio = ndev->self_mmio; + u32 ntb_ctl; + + amd_set_side_info_reg(ndev, false); + + ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); + ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); + writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); +} + +static void amd_deinit_side_info(struct amd_ntb_dev *ndev) +{ + void __iomem *mmio = ndev->self_mmio; + u32 ntb_ctl; + + amd_clear_side_info_reg(ndev, false); + + ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); + ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); + writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); +} + static int amd_init_ntb(struct amd_ntb_dev *ndev) { void __iomem *mmio = ndev->self_mmio; - ndev->mw_count = AMD_MW_CNT; + ndev->mw_count = ndev->dev_data->mw_count; ndev->spad_count = AMD_SPADS_CNT; ndev->db_count = AMD_DB_CNT; @@ -935,8 +1088,6 @@ return -EINVAL; } - ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; - /* Mask event interrupts */ writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); @@ -957,6 +1108,7 @@ static int amd_init_dev(struct amd_ntb_dev *ndev) { + void __iomem *mmio = ndev->self_mmio; struct pci_dev *pdev; int rc = 0; @@ -977,6 +1129,25 @@ } ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + /* + * We reserve the highest order bit of the DB register which will + * be used to notify peer when the driver on this side is being + * un-loaded. + */ + ndev->db_last_bit = + find_last_bit((unsigned long *)&ndev->db_valid_mask, + hweight64(ndev->db_valid_mask)); + writew((u16)~BIT(ndev->db_last_bit), mmio + AMD_DBMASK_OFFSET); + /* + * Since now there is one less bit to account for, the DB count + * and DB mask should be adjusted accordingly. + */ + ndev->db_count -= 1; + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + + /* Enable Link-Up and Link-Down event interrupts */ + ndev->int_mask &= ~(AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT); + writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); return 0; } @@ -1020,10 +1191,6 @@ goto err_dma_mask; dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n"); } - rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev, - dma_get_mask(&pdev->dev)); - if (rc) - goto err_dma_mask; ndev->self_mmio = pci_iomap(pdev, 0, 0); if (!ndev->self_mmio) { @@ -1070,6 +1237,8 @@ goto err_ndev; } + ndev->dev_data = (struct ntb_dev_data *)id->driver_data; + ndev_init_struct(ndev, pdev); rc = amd_ntb_init_pci(ndev, pdev); @@ -1110,9 +1279,31 @@ { struct amd_ntb_dev *ndev = pci_get_drvdata(pdev); + /* + * Clear the READY bit in SIDEINFO register before sending DB event + * to the peer. This will make sure that when the peer handles the + * DB event, it correctly reads this bit as being 0. + */ + amd_deinit_side_info(ndev); + ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit)); ntb_unregister_device(&ndev->ntb); ndev_deinit_debugfs(ndev); + amd_deinit_dev(ndev); + amd_ntb_deinit_pci(ndev); + kfree(ndev); +} + +static void amd_ntb_pci_shutdown(struct pci_dev *pdev) +{ + struct amd_ntb_dev *ndev = pci_get_drvdata(pdev); + + /* Send link down notification */ + ntb_link_event(&ndev->ntb); + amd_deinit_side_info(ndev); + ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit)); + ntb_unregister_device(&ndev->ntb); + ndev_deinit_debugfs(ndev); amd_deinit_dev(ndev); amd_ntb_deinit_pci(ndev); kfree(ndev); @@ -1124,9 +1315,22 @@ .read = ndev_debugfs_read, }; +static const struct ntb_dev_data dev_data[] = { + { /* for device 145b */ + .mw_count = 3, + .mw_idx = 1, + }, + { /* for device 148b */ + .mw_count = 2, + .mw_idx = 2, + }, +}; + static const struct pci_device_id amd_ntb_pci_tbl[] = { - {PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NTB)}, - {0} + { PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] }, + { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] }, + { 0, } }; MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl); @@ -1135,16 +1339,22 @@ .id_table = amd_ntb_pci_tbl, .probe = amd_ntb_pci_probe, .remove = amd_ntb_pci_remove, + .shutdown = amd_ntb_pci_shutdown, }; static int __init amd_ntb_pci_driver_init(void) { + int ret; pr_info("%s %s\n", NTB_DESC, NTB_VER); if (debugfs_initialized()) debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); - return pci_register_driver(&amd_ntb_pci_driver); + ret = pci_register_driver(&amd_ntb_pci_driver); + if (ret) + debugfs_remove_recursive(debugfs_dir); + + return ret; } module_init(amd_ntb_pci_driver_init); -- Gitblit v1.6.2