| .. | .. |
|---|
| 78 | 78 | if (idx < 0 || idx > ndev->mw_count) |
|---|
| 79 | 79 | return -EINVAL; |
|---|
| 80 | 80 | |
|---|
| 81 | | - return 1 << idx; |
|---|
| 81 | + return ndev->dev_data->mw_idx << idx; |
|---|
| 82 | 82 | } |
|---|
| 83 | 83 | |
|---|
| 84 | 84 | static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx) |
|---|
| .. | .. |
|---|
| 160 | 160 | } |
|---|
| 161 | 161 | |
|---|
| 162 | 162 | /* set and verify setting the limit */ |
|---|
| 163 | | - write64(limit, mmio + limit_reg); |
|---|
| 164 | | - reg_val = read64(mmio + limit_reg); |
|---|
| 163 | + write64(limit, peer_mmio + limit_reg); |
|---|
| 164 | + reg_val = read64(peer_mmio + limit_reg); |
|---|
| 165 | 165 | if (reg_val != limit) { |
|---|
| 166 | 166 | write64(base_addr, mmio + limit_reg); |
|---|
| 167 | 167 | write64(0, peer_mmio + xlat_reg); |
|---|
| .. | .. |
|---|
| 183 | 183 | } |
|---|
| 184 | 184 | |
|---|
| 185 | 185 | /* set and verify setting the limit */ |
|---|
| 186 | | - writel(limit, mmio + limit_reg); |
|---|
| 187 | | - reg_val = readl(mmio + limit_reg); |
|---|
| 186 | + writel(limit, peer_mmio + limit_reg); |
|---|
| 187 | + reg_val = readl(peer_mmio + limit_reg); |
|---|
| 188 | 188 | if (reg_val != limit) { |
|---|
| 189 | 189 | writel(base_addr, mmio + limit_reg); |
|---|
| 190 | 190 | writel(0, peer_mmio + xlat_reg); |
|---|
| .. | .. |
|---|
| 195 | 195 | return 0; |
|---|
| 196 | 196 | } |
|---|
| 197 | 197 | |
|---|
| 198 | | -static int amd_link_is_up(struct amd_ntb_dev *ndev) |
|---|
| 198 | +static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev) |
|---|
| 199 | 199 | { |
|---|
| 200 | | - if (!ndev->peer_sta) |
|---|
| 201 | | - return NTB_LNK_STA_ACTIVE(ndev->cntl_sta); |
|---|
| 200 | + struct pci_dev *pdev = NULL; |
|---|
| 201 | + struct pci_dev *pci_swds = NULL; |
|---|
| 202 | + struct pci_dev *pci_swus = NULL; |
|---|
| 203 | + u32 stat; |
|---|
| 204 | + int rc; |
|---|
| 202 | 205 | |
|---|
| 203 | | - if (ndev->peer_sta & AMD_LINK_UP_EVENT) { |
|---|
| 204 | | - ndev->peer_sta = 0; |
|---|
| 205 | | - return 1; |
|---|
| 206 | + if (ndev->ntb.topo == NTB_TOPO_SEC) { |
|---|
| 207 | + /* Locate the pointer to Downstream Switch for this device */ |
|---|
| 208 | + pci_swds = pci_upstream_bridge(ndev->ntb.pdev); |
|---|
| 209 | + if (pci_swds) { |
|---|
| 210 | + /* |
|---|
| 211 | + * Locate the pointer to Upstream Switch for |
|---|
| 212 | + * the Downstream Switch. |
|---|
| 213 | + */ |
|---|
| 214 | + pci_swus = pci_upstream_bridge(pci_swds); |
|---|
| 215 | + if (pci_swus) { |
|---|
| 216 | + rc = pcie_capability_read_dword(pci_swus, |
|---|
| 217 | + PCI_EXP_LNKCTL, |
|---|
| 218 | + &stat); |
|---|
| 219 | + if (rc) |
|---|
| 220 | + return 0; |
|---|
| 221 | + } else { |
|---|
| 222 | + return 0; |
|---|
| 223 | + } |
|---|
| 224 | + } else { |
|---|
| 225 | + return 0; |
|---|
| 226 | + } |
|---|
| 227 | + } else if (ndev->ntb.topo == NTB_TOPO_PRI) { |
|---|
| 228 | + /* |
|---|
| 229 | + * For NTB primary, we simply read the Link Status and control |
|---|
| 230 | + * register of the NTB device itself. |
|---|
| 231 | + */ |
|---|
| 232 | + pdev = ndev->ntb.pdev; |
|---|
| 233 | + rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat); |
|---|
| 234 | + if (rc) |
|---|
| 235 | + return 0; |
|---|
| 236 | + } else { |
|---|
| 237 | + /* Catch all for everything else */ |
|---|
| 238 | + return 0; |
|---|
| 206 | 239 | } |
|---|
| 207 | 240 | |
|---|
| 208 | | - /* If peer_sta is reset or D0 event, the ISR has |
|---|
| 209 | | - * started a timer to check link status of hardware. |
|---|
| 210 | | - * So here just clear status bit. And if peer_sta is |
|---|
| 211 | | - * D3 or PME_TO, D0/reset event will be happened when |
|---|
| 212 | | - * system wakeup/poweron, so do nothing here. |
|---|
| 241 | + ndev->lnk_sta = stat; |
|---|
| 242 | + |
|---|
| 243 | + return 1; |
|---|
| 244 | +} |
|---|
| 245 | + |
|---|
| 246 | +static int amd_link_is_up(struct amd_ntb_dev *ndev) |
|---|
| 247 | +{ |
|---|
| 248 | + int ret; |
|---|
| 249 | + |
|---|
| 250 | + /* |
|---|
| 251 | + * We consider the link to be up under two conditions: |
|---|
| 252 | + * |
|---|
| 253 | + * - When a link-up event is received. This is indicated by |
|---|
| 254 | + * AMD_LINK_UP_EVENT set in peer_sta. |
|---|
| 255 | + * - When driver on both sides of the link have been loaded. |
|---|
| 256 | + * This is indicated by bit 1 being set in the peer |
|---|
| 257 | + * SIDEINFO register. |
|---|
| 258 | + * |
|---|
| 259 | + * This function should return 1 when the latter of the above |
|---|
| 260 | + * two conditions is true. |
|---|
| 261 | + * |
|---|
| 262 | + * Now consider the sequence of events - Link-Up event occurs, |
|---|
| 263 | + * then the peer side driver loads. In this case, we would have |
|---|
| 264 | + * received LINK_UP event and bit 1 of peer SIDEINFO is also |
|---|
| 265 | + * set. What happens now if the link goes down? Bit 1 of |
|---|
| 266 | + * peer SIDEINFO remains set, but LINK_DOWN bit is set in |
|---|
| 267 | + * peer_sta. So we should return 0 from this function. Not only |
|---|
| 268 | + * that, we clear bit 1 of peer SIDEINFO to 0, since the peer |
|---|
| 269 | + * side driver did not even get a chance to clear it before |
|---|
| 270 | + * the link went down. This can be the case of surprise link |
|---|
| 271 | + * removal. |
|---|
| 272 | + * |
|---|
| 273 | + * LINK_UP event will always occur before the peer side driver |
|---|
| 274 | + * gets loaded the very first time. So there can be a case when |
|---|
| 275 | + * the LINK_UP event has occurred, but the peer side driver hasn't |
|---|
| 276 | + * yet loaded. We return 0 in that case. |
|---|
| 277 | + * |
|---|
| 278 | + * There is also a special case when the primary side driver is |
|---|
| 279 | + * unloaded and then loaded again. Since there is no change in |
|---|
| 280 | + * the status of NTB secondary in this case, there is no Link-Up |
|---|
| 281 | + * or Link-Down notification received. We recognize this condition |
|---|
| 282 | + * with peer_sta being set to 0. |
|---|
| 283 | + * |
|---|
| 284 | + * If bit 1 of peer SIDEINFO register is not set, then we |
|---|
| 285 | + * simply return 0 irrespective of the link up or down status |
|---|
| 286 | + * set in peer_sta. |
|---|
| 213 | 287 | */ |
|---|
| 214 | | - if (ndev->peer_sta & AMD_PEER_RESET_EVENT) |
|---|
| 215 | | - ndev->peer_sta &= ~AMD_PEER_RESET_EVENT; |
|---|
| 216 | | - else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT)) |
|---|
| 217 | | - ndev->peer_sta = 0; |
|---|
| 288 | + ret = amd_poll_link(ndev); |
|---|
| 289 | + if (ret) { |
|---|
| 290 | + /* |
|---|
| 291 | + * We need to check the below only for NTB primary. For NTB |
|---|
| 292 | + * secondary, simply checking the result of PSIDE_INFO |
|---|
| 293 | + * register will suffice. |
|---|
| 294 | + */ |
|---|
| 295 | + if (ndev->ntb.topo == NTB_TOPO_PRI) { |
|---|
| 296 | + if ((ndev->peer_sta & AMD_LINK_UP_EVENT) || |
|---|
| 297 | + (ndev->peer_sta == 0)) |
|---|
| 298 | + return ret; |
|---|
| 299 | + else if (ndev->peer_sta & AMD_LINK_DOWN_EVENT) { |
|---|
| 300 | + /* Clear peer sideinfo register */ |
|---|
| 301 | + amd_clear_side_info_reg(ndev, true); |
|---|
| 302 | + |
|---|
| 303 | + return 0; |
|---|
| 304 | + } |
|---|
| 305 | + } else { /* NTB_TOPO_SEC */ |
|---|
| 306 | + return ret; |
|---|
| 307 | + } |
|---|
| 308 | + } |
|---|
| 218 | 309 | |
|---|
| 219 | 310 | return 0; |
|---|
| 220 | 311 | } |
|---|
| .. | .. |
|---|
| 253 | 344 | { |
|---|
| 254 | 345 | struct amd_ntb_dev *ndev = ntb_ndev(ntb); |
|---|
| 255 | 346 | void __iomem *mmio = ndev->self_mmio; |
|---|
| 256 | | - u32 ntb_ctl; |
|---|
| 257 | 347 | |
|---|
| 258 | 348 | /* Enable event interrupt */ |
|---|
| 259 | 349 | ndev->int_mask &= ~AMD_EVENT_INTMASK; |
|---|
| .. | .. |
|---|
| 263 | 353 | return -EINVAL; |
|---|
| 264 | 354 | dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); |
|---|
| 265 | 355 | |
|---|
| 266 | | - ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); |
|---|
| 267 | | - ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); |
|---|
| 268 | | - writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); |
|---|
| 269 | | - |
|---|
| 270 | 356 | return 0; |
|---|
| 271 | 357 | } |
|---|
| 272 | 358 | |
|---|
| .. | .. |
|---|
| 274 | 360 | { |
|---|
| 275 | 361 | struct amd_ntb_dev *ndev = ntb_ndev(ntb); |
|---|
| 276 | 362 | void __iomem *mmio = ndev->self_mmio; |
|---|
| 277 | | - u32 ntb_ctl; |
|---|
| 278 | 363 | |
|---|
| 279 | 364 | /* Disable event interrupt */ |
|---|
| 280 | 365 | ndev->int_mask |= AMD_EVENT_INTMASK; |
|---|
| .. | .. |
|---|
| 283 | 368 | if (ndev->ntb.topo == NTB_TOPO_SEC) |
|---|
| 284 | 369 | return -EINVAL; |
|---|
| 285 | 370 | dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); |
|---|
| 286 | | - |
|---|
| 287 | | - ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); |
|---|
| 288 | | - ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); |
|---|
| 289 | | - writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); |
|---|
| 290 | 371 | |
|---|
| 291 | 372 | return 0; |
|---|
| 292 | 373 | } |
|---|
| .. | .. |
|---|
| 333 | 414 | if (db_vector < 0 || db_vector > ndev->db_count) |
|---|
| 334 | 415 | return 0; |
|---|
| 335 | 416 | |
|---|
| 336 | | - return ntb_ndev(ntb)->db_valid_mask & (1 << db_vector); |
|---|
| 417 | + return ntb_ndev(ntb)->db_valid_mask & (1ULL << db_vector); |
|---|
| 337 | 418 | } |
|---|
| 338 | 419 | |
|---|
| 339 | 420 | static u64 amd_ntb_db_read(struct ntb_dev *ntb) |
|---|
| .. | .. |
|---|
| 493 | 574 | reg = readl(mmio + AMD_SMUACK_OFFSET); |
|---|
| 494 | 575 | reg |= bit; |
|---|
| 495 | 576 | writel(reg, mmio + AMD_SMUACK_OFFSET); |
|---|
| 496 | | - |
|---|
| 497 | | - ndev->peer_sta |= bit; |
|---|
| 498 | 577 | } |
|---|
| 499 | 578 | |
|---|
| 500 | 579 | static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) |
|---|
| .. | .. |
|---|
| 512 | 591 | status &= AMD_EVENT_INTMASK; |
|---|
| 513 | 592 | switch (status) { |
|---|
| 514 | 593 | case AMD_PEER_FLUSH_EVENT: |
|---|
| 594 | + ndev->peer_sta |= AMD_PEER_FLUSH_EVENT; |
|---|
| 515 | 595 | dev_info(dev, "Flush is done.\n"); |
|---|
| 516 | 596 | break; |
|---|
| 517 | 597 | case AMD_PEER_RESET_EVENT: |
|---|
| 518 | | - amd_ack_smu(ndev, AMD_PEER_RESET_EVENT); |
|---|
| 598 | + case AMD_LINK_DOWN_EVENT: |
|---|
| 599 | + ndev->peer_sta |= status; |
|---|
| 600 | + if (status == AMD_LINK_DOWN_EVENT) |
|---|
| 601 | + ndev->peer_sta &= ~AMD_LINK_UP_EVENT; |
|---|
| 602 | + |
|---|
| 603 | + amd_ack_smu(ndev, status); |
|---|
| 519 | 604 | |
|---|
| 520 | 605 | /* link down first */ |
|---|
| 521 | 606 | ntb_link_event(&ndev->ntb); |
|---|
| .. | .. |
|---|
| 526 | 611 | case AMD_PEER_D3_EVENT: |
|---|
| 527 | 612 | case AMD_PEER_PMETO_EVENT: |
|---|
| 528 | 613 | case AMD_LINK_UP_EVENT: |
|---|
| 529 | | - case AMD_LINK_DOWN_EVENT: |
|---|
| 614 | + ndev->peer_sta |= status; |
|---|
| 615 | + if (status == AMD_LINK_UP_EVENT) |
|---|
| 616 | + ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT; |
|---|
| 617 | + else if (status == AMD_PEER_D3_EVENT) |
|---|
| 618 | + ndev->peer_sta &= ~AMD_PEER_D0_EVENT; |
|---|
| 619 | + |
|---|
| 530 | 620 | amd_ack_smu(ndev, status); |
|---|
| 531 | 621 | |
|---|
| 532 | 622 | /* link down */ |
|---|
| .. | .. |
|---|
| 540 | 630 | if (status & 0x1) |
|---|
| 541 | 631 | dev_info(dev, "Wakeup is done.\n"); |
|---|
| 542 | 632 | |
|---|
| 633 | + ndev->peer_sta |= AMD_PEER_D0_EVENT; |
|---|
| 634 | + ndev->peer_sta &= ~AMD_PEER_D3_EVENT; |
|---|
| 543 | 635 | amd_ack_smu(ndev, AMD_PEER_D0_EVENT); |
|---|
| 544 | 636 | |
|---|
| 545 | 637 | /* start a timer to poll link status */ |
|---|
| .. | .. |
|---|
| 550 | 642 | dev_info(dev, "event status = 0x%x.\n", status); |
|---|
| 551 | 643 | break; |
|---|
| 552 | 644 | } |
|---|
| 645 | + |
|---|
| 646 | + /* Clear the interrupt status */ |
|---|
| 647 | + writel(status, mmio + AMD_INTSTAT_OFFSET); |
|---|
| 648 | +} |
|---|
| 649 | + |
|---|
| 650 | +static void amd_handle_db_event(struct amd_ntb_dev *ndev, int vec) |
|---|
| 651 | +{ |
|---|
| 652 | + struct device *dev = &ndev->ntb.pdev->dev; |
|---|
| 653 | + u64 status; |
|---|
| 654 | + |
|---|
| 655 | + status = amd_ntb_db_read(&ndev->ntb); |
|---|
| 656 | + |
|---|
| 657 | + dev_dbg(dev, "status = 0x%llx and vec = %d\n", status, vec); |
|---|
| 658 | + |
|---|
| 659 | + /* |
|---|
| 660 | + * Since we had reserved highest order bit of DB for signaling peer of |
|---|
| 661 | + * a special event, this is the only status bit we should be concerned |
|---|
| 662 | + * here now. |
|---|
| 663 | + */ |
|---|
| 664 | + if (status & BIT(ndev->db_last_bit)) { |
|---|
| 665 | + ntb_db_clear(&ndev->ntb, BIT(ndev->db_last_bit)); |
|---|
| 666 | + /* send link down event notification */ |
|---|
| 667 | + ntb_link_event(&ndev->ntb); |
|---|
| 668 | + |
|---|
| 669 | + /* |
|---|
| 670 | + * If we are here, that means the peer has signalled a special |
|---|
| 671 | + * event which notifies that the peer driver has been |
|---|
| 672 | + * un-loaded for some reason. Since there is a chance that the |
|---|
| 673 | + * peer will load its driver again sometime, we schedule link |
|---|
| 674 | + * polling routine. |
|---|
| 675 | + */ |
|---|
| 676 | + schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); |
|---|
| 677 | + } |
|---|
| 553 | 678 | } |
|---|
| 554 | 679 | |
|---|
| 555 | 680 | static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec) |
|---|
| .. | .. |
|---|
| 559 | 684 | if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1)) |
|---|
| 560 | 685 | amd_handle_event(ndev, vec); |
|---|
| 561 | 686 | |
|---|
| 562 | | - if (vec < AMD_DB_CNT) |
|---|
| 687 | + if (vec < AMD_DB_CNT) { |
|---|
| 688 | + amd_handle_db_event(ndev, vec); |
|---|
| 563 | 689 | ntb_db_event(&ndev->ntb, vec); |
|---|
| 690 | + } |
|---|
| 564 | 691 | |
|---|
| 565 | 692 | return IRQ_HANDLED; |
|---|
| 566 | 693 | } |
|---|
| .. | .. |
|---|
| 842 | 969 | static int amd_poll_link(struct amd_ntb_dev *ndev) |
|---|
| 843 | 970 | { |
|---|
| 844 | 971 | void __iomem *mmio = ndev->peer_mmio; |
|---|
| 845 | | - u32 reg, stat; |
|---|
| 846 | | - int rc; |
|---|
| 972 | + u32 reg; |
|---|
| 847 | 973 | |
|---|
| 848 | 974 | reg = readl(mmio + AMD_SIDEINFO_OFFSET); |
|---|
| 849 | | - reg &= NTB_LIN_STA_ACTIVE_BIT; |
|---|
| 975 | + reg &= AMD_SIDE_READY; |
|---|
| 850 | 976 | |
|---|
| 851 | 977 | dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg); |
|---|
| 852 | 978 | |
|---|
| 853 | | - if (reg == ndev->cntl_sta) |
|---|
| 854 | | - return 0; |
|---|
| 855 | | - |
|---|
| 856 | 979 | ndev->cntl_sta = reg; |
|---|
| 857 | 980 | |
|---|
| 858 | | - rc = pci_read_config_dword(ndev->ntb.pdev, |
|---|
| 859 | | - AMD_LINK_STATUS_OFFSET, &stat); |
|---|
| 860 | | - if (rc) |
|---|
| 861 | | - return 0; |
|---|
| 862 | | - ndev->lnk_sta = stat; |
|---|
| 981 | + amd_ntb_get_link_status(ndev); |
|---|
| 863 | 982 | |
|---|
| 864 | | - return 1; |
|---|
| 983 | + return ndev->cntl_sta; |
|---|
| 865 | 984 | } |
|---|
| 866 | 985 | |
|---|
| 867 | 986 | static void amd_link_hb(struct work_struct *work) |
|---|
| .. | .. |
|---|
| 880 | 999 | return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT); |
|---|
| 881 | 1000 | } |
|---|
| 882 | 1001 | |
|---|
| 883 | | -static void amd_init_side_info(struct amd_ntb_dev *ndev) |
|---|
| 1002 | +static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer) |
|---|
| 884 | 1003 | { |
|---|
| 885 | | - void __iomem *mmio = ndev->self_mmio; |
|---|
| 1004 | + void __iomem *mmio = NULL; |
|---|
| 886 | 1005 | unsigned int reg; |
|---|
| 1006 | + |
|---|
| 1007 | + if (peer) |
|---|
| 1008 | + mmio = ndev->peer_mmio; |
|---|
| 1009 | + else |
|---|
| 1010 | + mmio = ndev->self_mmio; |
|---|
| 887 | 1011 | |
|---|
| 888 | 1012 | reg = readl(mmio + AMD_SIDEINFO_OFFSET); |
|---|
| 889 | 1013 | if (!(reg & AMD_SIDE_READY)) { |
|---|
| .. | .. |
|---|
| 892 | 1016 | } |
|---|
| 893 | 1017 | } |
|---|
| 894 | 1018 | |
|---|
| 895 | | -static void amd_deinit_side_info(struct amd_ntb_dev *ndev) |
|---|
| 1019 | +static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer) |
|---|
| 896 | 1020 | { |
|---|
| 897 | | - void __iomem *mmio = ndev->self_mmio; |
|---|
| 1021 | + void __iomem *mmio = NULL; |
|---|
| 898 | 1022 | unsigned int reg; |
|---|
| 1023 | + |
|---|
| 1024 | + if (peer) |
|---|
| 1025 | + mmio = ndev->peer_mmio; |
|---|
| 1026 | + else |
|---|
| 1027 | + mmio = ndev->self_mmio; |
|---|
| 899 | 1028 | |
|---|
| 900 | 1029 | reg = readl(mmio + AMD_SIDEINFO_OFFSET); |
|---|
| 901 | 1030 | if (reg & AMD_SIDE_READY) { |
|---|
| .. | .. |
|---|
| 905 | 1034 | } |
|---|
| 906 | 1035 | } |
|---|
| 907 | 1036 | |
|---|
| 1037 | +static void amd_init_side_info(struct amd_ntb_dev *ndev) |
|---|
| 1038 | +{ |
|---|
| 1039 | + void __iomem *mmio = ndev->self_mmio; |
|---|
| 1040 | + u32 ntb_ctl; |
|---|
| 1041 | + |
|---|
| 1042 | + amd_set_side_info_reg(ndev, false); |
|---|
| 1043 | + |
|---|
| 1044 | + ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); |
|---|
| 1045 | + ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); |
|---|
| 1046 | + writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); |
|---|
| 1047 | +} |
|---|
| 1048 | + |
|---|
| 1049 | +static void amd_deinit_side_info(struct amd_ntb_dev *ndev) |
|---|
| 1050 | +{ |
|---|
| 1051 | + void __iomem *mmio = ndev->self_mmio; |
|---|
| 1052 | + u32 ntb_ctl; |
|---|
| 1053 | + |
|---|
| 1054 | + amd_clear_side_info_reg(ndev, false); |
|---|
| 1055 | + |
|---|
| 1056 | + ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); |
|---|
| 1057 | + ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); |
|---|
| 1058 | + writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); |
|---|
| 1059 | +} |
|---|
| 1060 | + |
|---|
| 908 | 1061 | static int amd_init_ntb(struct amd_ntb_dev *ndev) |
|---|
| 909 | 1062 | { |
|---|
| 910 | 1063 | void __iomem *mmio = ndev->self_mmio; |
|---|
| 911 | 1064 | |
|---|
| 912 | | - ndev->mw_count = AMD_MW_CNT; |
|---|
| 1065 | + ndev->mw_count = ndev->dev_data->mw_count; |
|---|
| 913 | 1066 | ndev->spad_count = AMD_SPADS_CNT; |
|---|
| 914 | 1067 | ndev->db_count = AMD_DB_CNT; |
|---|
| 915 | 1068 | |
|---|
| .. | .. |
|---|
| 935 | 1088 | return -EINVAL; |
|---|
| 936 | 1089 | } |
|---|
| 937 | 1090 | |
|---|
| 938 | | - ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; |
|---|
| 939 | | - |
|---|
| 940 | 1091 | /* Mask event interrupts */ |
|---|
| 941 | 1092 | writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); |
|---|
| 942 | 1093 | |
|---|
| .. | .. |
|---|
| 957 | 1108 | |
|---|
| 958 | 1109 | static int amd_init_dev(struct amd_ntb_dev *ndev) |
|---|
| 959 | 1110 | { |
|---|
| 1111 | + void __iomem *mmio = ndev->self_mmio; |
|---|
| 960 | 1112 | struct pci_dev *pdev; |
|---|
| 961 | 1113 | int rc = 0; |
|---|
| 962 | 1114 | |
|---|
| .. | .. |
|---|
| 977 | 1129 | } |
|---|
| 978 | 1130 | |
|---|
| 979 | 1131 | ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; |
|---|
| 1132 | + /* |
|---|
| 1133 | + * We reserve the highest order bit of the DB register which will |
|---|
| 1134 | + * be used to notify peer when the driver on this side is being |
|---|
| 1135 | + * un-loaded. |
|---|
| 1136 | + */ |
|---|
| 1137 | + ndev->db_last_bit = |
|---|
| 1138 | + find_last_bit((unsigned long *)&ndev->db_valid_mask, |
|---|
| 1139 | + hweight64(ndev->db_valid_mask)); |
|---|
| 1140 | + writew((u16)~BIT(ndev->db_last_bit), mmio + AMD_DBMASK_OFFSET); |
|---|
| 1141 | + /* |
|---|
| 1142 | + * Since now there is one less bit to account for, the DB count |
|---|
| 1143 | + * and DB mask should be adjusted accordingly. |
|---|
| 1144 | + */ |
|---|
| 1145 | + ndev->db_count -= 1; |
|---|
| 1146 | + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; |
|---|
| 1147 | + |
|---|
| 1148 | + /* Enable Link-Up and Link-Down event interrupts */ |
|---|
| 1149 | + ndev->int_mask &= ~(AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT); |
|---|
| 1150 | + writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); |
|---|
| 980 | 1151 | |
|---|
| 981 | 1152 | return 0; |
|---|
| 982 | 1153 | } |
|---|
| .. | .. |
|---|
| 1020 | 1191 | goto err_dma_mask; |
|---|
| 1021 | 1192 | dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n"); |
|---|
| 1022 | 1193 | } |
|---|
| 1023 | | - rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev, |
|---|
| 1024 | | - dma_get_mask(&pdev->dev)); |
|---|
| 1025 | | - if (rc) |
|---|
| 1026 | | - goto err_dma_mask; |
|---|
| 1027 | 1194 | |
|---|
| 1028 | 1195 | ndev->self_mmio = pci_iomap(pdev, 0, 0); |
|---|
| 1029 | 1196 | if (!ndev->self_mmio) { |
|---|
| .. | .. |
|---|
| 1070 | 1237 | goto err_ndev; |
|---|
| 1071 | 1238 | } |
|---|
| 1072 | 1239 | |
|---|
| 1240 | + ndev->dev_data = (struct ntb_dev_data *)id->driver_data; |
|---|
| 1241 | + |
|---|
| 1073 | 1242 | ndev_init_struct(ndev, pdev); |
|---|
| 1074 | 1243 | |
|---|
| 1075 | 1244 | rc = amd_ntb_init_pci(ndev, pdev); |
|---|
| .. | .. |
|---|
| 1110 | 1279 | { |
|---|
| 1111 | 1280 | struct amd_ntb_dev *ndev = pci_get_drvdata(pdev); |
|---|
| 1112 | 1281 | |
|---|
| 1282 | + /* |
|---|
| 1283 | + * Clear the READY bit in SIDEINFO register before sending DB event |
|---|
| 1284 | + * to the peer. This will make sure that when the peer handles the |
|---|
| 1285 | + * DB event, it correctly reads this bit as being 0. |
|---|
| 1286 | + */ |
|---|
| 1287 | + amd_deinit_side_info(ndev); |
|---|
| 1288 | + ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit)); |
|---|
| 1113 | 1289 | ntb_unregister_device(&ndev->ntb); |
|---|
| 1114 | 1290 | ndev_deinit_debugfs(ndev); |
|---|
| 1291 | + amd_deinit_dev(ndev); |
|---|
| 1292 | + amd_ntb_deinit_pci(ndev); |
|---|
| 1293 | + kfree(ndev); |
|---|
| 1294 | +} |
|---|
| 1295 | + |
|---|
| 1296 | +static void amd_ntb_pci_shutdown(struct pci_dev *pdev) |
|---|
| 1297 | +{ |
|---|
| 1298 | + struct amd_ntb_dev *ndev = pci_get_drvdata(pdev); |
|---|
| 1299 | + |
|---|
| 1300 | + /* Send link down notification */ |
|---|
| 1301 | + ntb_link_event(&ndev->ntb); |
|---|
| 1302 | + |
|---|
| 1115 | 1303 | amd_deinit_side_info(ndev); |
|---|
| 1304 | + ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit)); |
|---|
| 1305 | + ntb_unregister_device(&ndev->ntb); |
|---|
| 1306 | + ndev_deinit_debugfs(ndev); |
|---|
| 1116 | 1307 | amd_deinit_dev(ndev); |
|---|
| 1117 | 1308 | amd_ntb_deinit_pci(ndev); |
|---|
| 1118 | 1309 | kfree(ndev); |
|---|
| .. | .. |
|---|
| 1124 | 1315 | .read = ndev_debugfs_read, |
|---|
| 1125 | 1316 | }; |
|---|
| 1126 | 1317 | |
|---|
| 1318 | +static const struct ntb_dev_data dev_data[] = { |
|---|
| 1319 | + { /* for device 145b */ |
|---|
| 1320 | + .mw_count = 3, |
|---|
| 1321 | + .mw_idx = 1, |
|---|
| 1322 | + }, |
|---|
| 1323 | + { /* for device 148b */ |
|---|
| 1324 | + .mw_count = 2, |
|---|
| 1325 | + .mw_idx = 2, |
|---|
| 1326 | + }, |
|---|
| 1327 | +}; |
|---|
| 1328 | + |
|---|
| 1127 | 1329 | static const struct pci_device_id amd_ntb_pci_tbl[] = { |
|---|
| 1128 | | - {PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NTB)}, |
|---|
| 1129 | | - {0} |
|---|
| 1330 | + { PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] }, |
|---|
| 1331 | + { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] }, |
|---|
| 1332 | + { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] }, |
|---|
| 1333 | + { 0, } |
|---|
| 1130 | 1334 | }; |
|---|
| 1131 | 1335 | MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl); |
|---|
| 1132 | 1336 | |
|---|
| .. | .. |
|---|
| 1135 | 1339 | .id_table = amd_ntb_pci_tbl, |
|---|
| 1136 | 1340 | .probe = amd_ntb_pci_probe, |
|---|
| 1137 | 1341 | .remove = amd_ntb_pci_remove, |
|---|
| 1342 | + .shutdown = amd_ntb_pci_shutdown, |
|---|
| 1138 | 1343 | }; |
|---|
| 1139 | 1344 | |
|---|
| 1140 | 1345 | static int __init amd_ntb_pci_driver_init(void) |
|---|
| 1141 | 1346 | { |
|---|
| 1347 | + int ret; |
|---|
| 1142 | 1348 | pr_info("%s %s\n", NTB_DESC, NTB_VER); |
|---|
| 1143 | 1349 | |
|---|
| 1144 | 1350 | if (debugfs_initialized()) |
|---|
| 1145 | 1351 | debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); |
|---|
| 1146 | 1352 | |
|---|
| 1147 | | - return pci_register_driver(&amd_ntb_pci_driver); |
|---|
| 1353 | + ret = pci_register_driver(&amd_ntb_pci_driver); |
|---|
| 1354 | + if (ret) |
|---|
| 1355 | + debugfs_remove_recursive(debugfs_dir); |
|---|
| 1356 | + |
|---|
| 1357 | + return ret; |
|---|
| 1148 | 1358 | } |
|---|
| 1149 | 1359 | module_init(amd_ntb_pci_driver_init); |
|---|
| 1150 | 1360 | |
|---|