.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | | - * Thunderbolt Cactus Ridge driver - NHI driver |
---|
| 3 | + * Thunderbolt driver - NHI driver |
---|
3 | 4 | * |
---|
4 | 5 | * The NHI (native host interface) is the pci device that allows us to send and |
---|
5 | 6 | * receive frames from the thunderbolt bus. |
---|
6 | 7 | * |
---|
7 | 8 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
---|
| 9 | + * Copyright (C) 2018, Intel Corporation |
---|
8 | 10 | */ |
---|
9 | 11 | |
---|
10 | 12 | #include <linux/pm_runtime.h> |
---|
.. | .. |
---|
14 | 16 | #include <linux/interrupt.h> |
---|
15 | 17 | #include <linux/module.h> |
---|
16 | 18 | #include <linux/delay.h> |
---|
| 19 | +#include <linux/property.h> |
---|
| 20 | +#include <linux/platform_data/x86/apple.h> |
---|
17 | 21 | |
---|
18 | 22 | #include "nhi.h" |
---|
19 | 23 | #include "nhi_regs.h" |
---|
.. | .. |
---|
21 | 25 | |
---|
22 | 26 | #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") |
---|
23 | 27 | |
---|
24 | | -/* |
---|
25 | | - * Used to enable end-to-end workaround for missing RX packets. Do not |
---|
26 | | - * use this ring for anything else. |
---|
27 | | - */ |
---|
28 | | -#define RING_E2E_UNUSED_HOPID 2 |
---|
29 | | -/* HopIDs 0-7 are reserved by the Thunderbolt protocol */ |
---|
30 | | -#define RING_FIRST_USABLE_HOPID 8 |
---|
| 28 | +#define RING_FIRST_USABLE_HOPID 1 |
---|
31 | 29 | |
---|
32 | 30 | /* |
---|
33 | 31 | * Minimal number of vectors when we use MSI-X. Two for control channel |
---|
.. | .. |
---|
38 | 36 | |
---|
39 | 37 | #define NHI_MAILBOX_TIMEOUT 500 /* ms */ |
---|
40 | 38 | |
---|
41 | | -static int ring_interrupt_index(struct tb_ring *ring) |
---|
| 39 | +static int ring_interrupt_index(const struct tb_ring *ring) |
---|
42 | 40 | { |
---|
43 | 41 | int bit = ring->hop; |
---|
44 | 42 | if (!ring->is_tx) |
---|
.. | .. |
---|
95 | 93 | else |
---|
96 | 94 | new = old & ~mask; |
---|
97 | 95 | |
---|
98 | | - dev_info(&ring->nhi->pdev->dev, |
---|
99 | | - "%s interrupt at register %#x bit %d (%#x -> %#x)\n", |
---|
100 | | - active ? "enabling" : "disabling", reg, bit, old, new); |
---|
| 96 | + dev_dbg(&ring->nhi->pdev->dev, |
---|
| 97 | + "%s interrupt at register %#x bit %d (%#x -> %#x)\n", |
---|
| 98 | + active ? "enabling" : "disabling", reg, bit, old, new); |
---|
101 | 99 | |
---|
102 | 100 | if (new == old) |
---|
103 | 101 | dev_WARN(&ring->nhi->pdev->dev, |
---|
.. | .. |
---|
449 | 447 | |
---|
450 | 448 | /* |
---|
451 | 449 | * Automatically allocate HopID from the non-reserved |
---|
452 | | - * range 8 .. hop_count - 1. |
---|
| 450 | + * range 1 .. hop_count - 1. |
---|
453 | 451 | */ |
---|
454 | 452 | for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { |
---|
455 | 453 | if (ring->is_tx) { |
---|
.. | .. |
---|
501 | 499 | void *poll_data) |
---|
502 | 500 | { |
---|
503 | 501 | struct tb_ring *ring = NULL; |
---|
504 | | - dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", |
---|
505 | | - transmit ? "TX" : "RX", hop, size); |
---|
506 | 502 | |
---|
507 | | - /* Tx Ring 2 is reserved for E2E workaround */ |
---|
508 | | - if (transmit && hop == RING_E2E_UNUSED_HOPID) |
---|
509 | | - return NULL; |
---|
| 503 | + dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", |
---|
| 504 | + transmit ? "TX" : "RX", hop, size); |
---|
510 | 505 | |
---|
511 | 506 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
---|
512 | 507 | if (!ring) |
---|
.. | .. |
---|
610 | 605 | dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); |
---|
611 | 606 | goto err; |
---|
612 | 607 | } |
---|
613 | | - dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", |
---|
614 | | - RING_TYPE(ring), ring->hop); |
---|
| 608 | + dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n", |
---|
| 609 | + RING_TYPE(ring), ring->hop); |
---|
615 | 610 | |
---|
616 | 611 | if (ring->flags & RING_FLAG_FRAME) { |
---|
617 | 612 | /* Means 4096 */ |
---|
.. | .. |
---|
620 | 615 | } else { |
---|
621 | 616 | frame_size = TB_FRAME_SIZE; |
---|
622 | 617 | flags = RING_FLAG_ENABLE | RING_FLAG_RAW; |
---|
623 | | - } |
---|
624 | | - |
---|
625 | | - if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { |
---|
626 | | - u32 hop; |
---|
627 | | - |
---|
628 | | - /* |
---|
629 | | - * In order not to lose Rx packets we enable end-to-end |
---|
630 | | - * workaround which transfers Rx credits to an unused Tx |
---|
631 | | - * HopID. |
---|
632 | | - */ |
---|
633 | | - hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT; |
---|
634 | | - hop &= REG_RX_OPTIONS_E2E_HOP_MASK; |
---|
635 | | - flags |= hop | RING_FLAG_E2E_FLOW_CONTROL; |
---|
636 | 618 | } |
---|
637 | 619 | |
---|
638 | 620 | ring_iowrite64desc(ring, ring->descriptors_dma, 0); |
---|
.. | .. |
---|
672 | 654 | { |
---|
673 | 655 | spin_lock_irq(&ring->nhi->lock); |
---|
674 | 656 | spin_lock(&ring->lock); |
---|
675 | | - dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n", |
---|
676 | | - RING_TYPE(ring), ring->hop); |
---|
| 657 | + dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n", |
---|
| 658 | + RING_TYPE(ring), ring->hop); |
---|
677 | 659 | if (ring->nhi->going_away) |
---|
678 | 660 | goto err; |
---|
679 | 661 | if (!ring->running) { |
---|
.. | .. |
---|
741 | 723 | ring->descriptors_dma = 0; |
---|
742 | 724 | |
---|
743 | 725 | |
---|
744 | | - dev_info(&ring->nhi->pdev->dev, |
---|
745 | | - "freeing %s %d\n", |
---|
746 | | - RING_TYPE(ring), |
---|
747 | | - ring->hop); |
---|
| 726 | + dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring), |
---|
| 727 | + ring->hop); |
---|
748 | 728 | |
---|
749 | 729 | /** |
---|
750 | 730 | * ring->work can no longer be scheduled (it is scheduled only |
---|
.. | .. |
---|
870 | 850 | return IRQ_HANDLED; |
---|
871 | 851 | } |
---|
872 | 852 | |
---|
| 853 | +static int __nhi_suspend_noirq(struct device *dev, bool wakeup) |
---|
| 854 | +{ |
---|
| 855 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 856 | + struct tb *tb = pci_get_drvdata(pdev); |
---|
| 857 | + struct tb_nhi *nhi = tb->nhi; |
---|
| 858 | + int ret; |
---|
| 859 | + |
---|
| 860 | + ret = tb_domain_suspend_noirq(tb); |
---|
| 861 | + if (ret) |
---|
| 862 | + return ret; |
---|
| 863 | + |
---|
| 864 | + if (nhi->ops && nhi->ops->suspend_noirq) { |
---|
| 865 | + ret = nhi->ops->suspend_noirq(tb->nhi, wakeup); |
---|
| 866 | + if (ret) |
---|
| 867 | + return ret; |
---|
| 868 | + } |
---|
| 869 | + |
---|
| 870 | + return 0; |
---|
| 871 | +} |
---|
| 872 | + |
---|
873 | 873 | static int nhi_suspend_noirq(struct device *dev) |
---|
| 874 | +{ |
---|
| 875 | + return __nhi_suspend_noirq(dev, device_may_wakeup(dev)); |
---|
| 876 | +} |
---|
| 877 | + |
---|
| 878 | +static int nhi_freeze_noirq(struct device *dev) |
---|
874 | 879 | { |
---|
875 | 880 | struct pci_dev *pdev = to_pci_dev(dev); |
---|
876 | 881 | struct tb *tb = pci_get_drvdata(pdev); |
---|
877 | 882 | |
---|
878 | | - return tb_domain_suspend_noirq(tb); |
---|
| 883 | + return tb_domain_freeze_noirq(tb); |
---|
| 884 | +} |
---|
| 885 | + |
---|
| 886 | +static int nhi_thaw_noirq(struct device *dev) |
---|
| 887 | +{ |
---|
| 888 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 889 | + struct tb *tb = pci_get_drvdata(pdev); |
---|
| 890 | + |
---|
| 891 | + return tb_domain_thaw_noirq(tb); |
---|
| 892 | +} |
---|
| 893 | + |
---|
| 894 | +static bool nhi_wake_supported(struct pci_dev *pdev) |
---|
| 895 | +{ |
---|
| 896 | + u8 val; |
---|
| 897 | + |
---|
| 898 | + /* |
---|
| 899 | + * If power rails are sustainable for wakeup from S4 this |
---|
| 900 | + * property is set by the BIOS. |
---|
| 901 | + */ |
---|
| 902 | + if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val)) |
---|
| 903 | + return !!val; |
---|
| 904 | + |
---|
| 905 | + return true; |
---|
| 906 | +} |
---|
| 907 | + |
---|
| 908 | +static int nhi_poweroff_noirq(struct device *dev) |
---|
| 909 | +{ |
---|
| 910 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 911 | + bool wakeup; |
---|
| 912 | + |
---|
| 913 | + wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev); |
---|
| 914 | + return __nhi_suspend_noirq(dev, wakeup); |
---|
879 | 915 | } |
---|
880 | 916 | |
---|
881 | 917 | static void nhi_enable_int_throttling(struct tb_nhi *nhi) |
---|
.. | .. |
---|
898 | 934 | { |
---|
899 | 935 | struct pci_dev *pdev = to_pci_dev(dev); |
---|
900 | 936 | struct tb *tb = pci_get_drvdata(pdev); |
---|
| 937 | + struct tb_nhi *nhi = tb->nhi; |
---|
| 938 | + int ret; |
---|
901 | 939 | |
---|
902 | 940 | /* |
---|
903 | 941 | * Check that the device is still there. It may be that the user |
---|
904 | 942 | * unplugged last device which causes the host controller to go |
---|
905 | 943 | * away on PCs. |
---|
906 | 944 | */ |
---|
907 | | - if (!pci_device_is_present(pdev)) |
---|
908 | | - tb->nhi->going_away = true; |
---|
909 | | - else |
---|
| 945 | + if (!pci_device_is_present(pdev)) { |
---|
| 946 | + nhi->going_away = true; |
---|
| 947 | + } else { |
---|
| 948 | + if (nhi->ops && nhi->ops->resume_noirq) { |
---|
| 949 | + ret = nhi->ops->resume_noirq(nhi); |
---|
| 950 | + if (ret) |
---|
| 951 | + return ret; |
---|
| 952 | + } |
---|
910 | 953 | nhi_enable_int_throttling(tb->nhi); |
---|
| 954 | + } |
---|
911 | 955 | |
---|
912 | 956 | return tb_domain_resume_noirq(tb); |
---|
913 | 957 | } |
---|
.. | .. |
---|
940 | 984 | { |
---|
941 | 985 | struct pci_dev *pdev = to_pci_dev(dev); |
---|
942 | 986 | struct tb *tb = pci_get_drvdata(pdev); |
---|
| 987 | + struct tb_nhi *nhi = tb->nhi; |
---|
| 988 | + int ret; |
---|
943 | 989 | |
---|
944 | | - return tb_domain_runtime_suspend(tb); |
---|
| 990 | + ret = tb_domain_runtime_suspend(tb); |
---|
| 991 | + if (ret) |
---|
| 992 | + return ret; |
---|
| 993 | + |
---|
| 994 | + if (nhi->ops && nhi->ops->runtime_suspend) { |
---|
| 995 | + ret = nhi->ops->runtime_suspend(tb->nhi); |
---|
| 996 | + if (ret) |
---|
| 997 | + return ret; |
---|
| 998 | + } |
---|
| 999 | + return 0; |
---|
945 | 1000 | } |
---|
946 | 1001 | |
---|
947 | 1002 | static int nhi_runtime_resume(struct device *dev) |
---|
948 | 1003 | { |
---|
949 | 1004 | struct pci_dev *pdev = to_pci_dev(dev); |
---|
950 | 1005 | struct tb *tb = pci_get_drvdata(pdev); |
---|
| 1006 | + struct tb_nhi *nhi = tb->nhi; |
---|
| 1007 | + int ret; |
---|
951 | 1008 | |
---|
952 | | - nhi_enable_int_throttling(tb->nhi); |
---|
| 1009 | + if (nhi->ops && nhi->ops->runtime_resume) { |
---|
| 1010 | + ret = nhi->ops->runtime_resume(nhi); |
---|
| 1011 | + if (ret) |
---|
| 1012 | + return ret; |
---|
| 1013 | + } |
---|
| 1014 | + |
---|
| 1015 | + nhi_enable_int_throttling(nhi); |
---|
953 | 1016 | return tb_domain_runtime_resume(tb); |
---|
954 | 1017 | } |
---|
955 | 1018 | |
---|
956 | 1019 | static void nhi_shutdown(struct tb_nhi *nhi) |
---|
957 | 1020 | { |
---|
958 | 1021 | int i; |
---|
959 | | - dev_info(&nhi->pdev->dev, "shutdown\n"); |
---|
| 1022 | + |
---|
| 1023 | + dev_dbg(&nhi->pdev->dev, "shutdown\n"); |
---|
960 | 1024 | |
---|
961 | 1025 | for (i = 0; i < nhi->hop_count; i++) { |
---|
962 | 1026 | if (nhi->tx_rings[i]) |
---|
.. | .. |
---|
976 | 1040 | flush_work(&nhi->interrupt_work); |
---|
977 | 1041 | } |
---|
978 | 1042 | ida_destroy(&nhi->msix_ida); |
---|
| 1043 | + |
---|
| 1044 | + if (nhi->ops && nhi->ops->shutdown) |
---|
| 1045 | + nhi->ops->shutdown(nhi); |
---|
979 | 1046 | } |
---|
980 | 1047 | |
---|
981 | 1048 | static int nhi_init_msi(struct tb_nhi *nhi) |
---|
.. | .. |
---|
1020 | 1087 | return 0; |
---|
1021 | 1088 | } |
---|
1022 | 1089 | |
---|
| 1090 | +static bool nhi_imr_valid(struct pci_dev *pdev) |
---|
| 1091 | +{ |
---|
| 1092 | + u8 val; |
---|
| 1093 | + |
---|
| 1094 | + if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val)) |
---|
| 1095 | + return !!val; |
---|
| 1096 | + |
---|
| 1097 | + return true; |
---|
| 1098 | +} |
---|
| 1099 | + |
---|
| 1100 | +/* |
---|
| 1101 | + * During suspend the Thunderbolt controller is reset and all PCIe |
---|
| 1102 | + * tunnels are lost. The NHI driver will try to reestablish all tunnels |
---|
| 1103 | + * during resume. This adds device links between the tunneled PCIe |
---|
| 1104 | + * downstream ports and the NHI so that the device core will make sure |
---|
| 1105 | + * NHI is resumed first before the rest. |
---|
| 1106 | + */ |
---|
| 1107 | +static void tb_apple_add_links(struct tb_nhi *nhi) |
---|
| 1108 | +{ |
---|
| 1109 | + struct pci_dev *upstream, *pdev; |
---|
| 1110 | + |
---|
| 1111 | + if (!x86_apple_machine) |
---|
| 1112 | + return; |
---|
| 1113 | + |
---|
| 1114 | + switch (nhi->pdev->device) { |
---|
| 1115 | + case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
---|
| 1116 | + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: |
---|
| 1117 | + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: |
---|
| 1118 | + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: |
---|
| 1119 | + break; |
---|
| 1120 | + default: |
---|
| 1121 | + return; |
---|
| 1122 | + } |
---|
| 1123 | + |
---|
| 1124 | + upstream = pci_upstream_bridge(nhi->pdev); |
---|
| 1125 | + while (upstream) { |
---|
| 1126 | + if (!pci_is_pcie(upstream)) |
---|
| 1127 | + return; |
---|
| 1128 | + if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) |
---|
| 1129 | + break; |
---|
| 1130 | + upstream = pci_upstream_bridge(upstream); |
---|
| 1131 | + } |
---|
| 1132 | + |
---|
| 1133 | + if (!upstream) |
---|
| 1134 | + return; |
---|
| 1135 | + |
---|
| 1136 | + /* |
---|
| 1137 | + * For each hotplug downstream port, create add device link |
---|
| 1138 | + * back to NHI so that PCIe tunnels can be re-established after |
---|
| 1139 | + * sleep. |
---|
| 1140 | + */ |
---|
| 1141 | + for_each_pci_bridge(pdev, upstream->subordinate) { |
---|
| 1142 | + const struct device_link *link; |
---|
| 1143 | + |
---|
| 1144 | + if (!pci_is_pcie(pdev)) |
---|
| 1145 | + continue; |
---|
| 1146 | + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || |
---|
| 1147 | + !pdev->is_hotplug_bridge) |
---|
| 1148 | + continue; |
---|
| 1149 | + |
---|
| 1150 | + link = device_link_add(&pdev->dev, &nhi->pdev->dev, |
---|
| 1151 | + DL_FLAG_AUTOREMOVE_SUPPLIER | |
---|
| 1152 | + DL_FLAG_PM_RUNTIME); |
---|
| 1153 | + if (link) { |
---|
| 1154 | + dev_dbg(&nhi->pdev->dev, "created link from %s\n", |
---|
| 1155 | + dev_name(&pdev->dev)); |
---|
| 1156 | + } else { |
---|
| 1157 | + dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", |
---|
| 1158 | + dev_name(&pdev->dev)); |
---|
| 1159 | + } |
---|
| 1160 | + } |
---|
| 1161 | +} |
---|
| 1162 | + |
---|
1023 | 1163 | static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
---|
1024 | 1164 | { |
---|
1025 | 1165 | struct tb_nhi *nhi; |
---|
1026 | 1166 | struct tb *tb; |
---|
1027 | 1167 | int res; |
---|
| 1168 | + |
---|
| 1169 | + if (!nhi_imr_valid(pdev)) { |
---|
| 1170 | + dev_warn(&pdev->dev, "firmware image not valid, aborting\n"); |
---|
| 1171 | + return -ENODEV; |
---|
| 1172 | + } |
---|
1028 | 1173 | |
---|
1029 | 1174 | res = pcim_enable_device(pdev); |
---|
1030 | 1175 | if (res) { |
---|
.. | .. |
---|
1043 | 1188 | return -ENOMEM; |
---|
1044 | 1189 | |
---|
1045 | 1190 | nhi->pdev = pdev; |
---|
| 1191 | + nhi->ops = (const struct tb_nhi_ops *)id->driver_data; |
---|
1046 | 1192 | /* cannot fail - table is allocated bin pcim_iomap_regions */ |
---|
1047 | 1193 | nhi->iobase = pcim_iomap_table(pdev)[0]; |
---|
1048 | 1194 | nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; |
---|
1049 | | - if (nhi->hop_count != 12 && nhi->hop_count != 32) |
---|
1050 | | - dev_warn(&pdev->dev, "unexpected hop count: %d\n", |
---|
1051 | | - nhi->hop_count); |
---|
| 1195 | + dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count); |
---|
1052 | 1196 | |
---|
1053 | 1197 | nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, |
---|
1054 | 1198 | sizeof(*nhi->tx_rings), GFP_KERNEL); |
---|
.. | .. |
---|
1075 | 1219 | |
---|
1076 | 1220 | pci_set_master(pdev); |
---|
1077 | 1221 | |
---|
| 1222 | + if (nhi->ops && nhi->ops->init) { |
---|
| 1223 | + res = nhi->ops->init(nhi); |
---|
| 1224 | + if (res) |
---|
| 1225 | + return res; |
---|
| 1226 | + } |
---|
| 1227 | + |
---|
| 1228 | + tb_apple_add_links(nhi); |
---|
| 1229 | + tb_acpi_add_links(nhi); |
---|
| 1230 | + |
---|
1078 | 1231 | tb = icm_probe(nhi); |
---|
1079 | 1232 | if (!tb) |
---|
1080 | 1233 | tb = tb_probe(nhi); |
---|
.. | .. |
---|
1084 | 1237 | return -ENODEV; |
---|
1085 | 1238 | } |
---|
1086 | 1239 | |
---|
1087 | | - dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); |
---|
| 1240 | + dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); |
---|
1088 | 1241 | |
---|
1089 | 1242 | res = tb_domain_add(tb); |
---|
1090 | 1243 | if (res) { |
---|
.. | .. |
---|
1097 | 1250 | return res; |
---|
1098 | 1251 | } |
---|
1099 | 1252 | pci_set_drvdata(pdev, tb); |
---|
| 1253 | + |
---|
| 1254 | + device_wakeup_enable(&pdev->dev); |
---|
1100 | 1255 | |
---|
1101 | 1256 | pm_runtime_allow(&pdev->dev); |
---|
1102 | 1257 | pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY); |
---|
.. | .. |
---|
1127 | 1282 | static const struct dev_pm_ops nhi_pm_ops = { |
---|
1128 | 1283 | .suspend_noirq = nhi_suspend_noirq, |
---|
1129 | 1284 | .resume_noirq = nhi_resume_noirq, |
---|
1130 | | - .freeze_noirq = nhi_suspend_noirq, /* |
---|
| 1285 | + .freeze_noirq = nhi_freeze_noirq, /* |
---|
1131 | 1286 | * we just disable hotplug, the |
---|
1132 | 1287 | * pci-tunnels stay alive. |
---|
1133 | 1288 | */ |
---|
1134 | | - .thaw_noirq = nhi_resume_noirq, |
---|
| 1289 | + .thaw_noirq = nhi_thaw_noirq, |
---|
1135 | 1290 | .restore_noirq = nhi_resume_noirq, |
---|
1136 | 1291 | .suspend = nhi_suspend, |
---|
1137 | | - .freeze = nhi_suspend, |
---|
| 1292 | + .poweroff_noirq = nhi_poweroff_noirq, |
---|
1138 | 1293 | .poweroff = nhi_suspend, |
---|
1139 | 1294 | .complete = nhi_complete, |
---|
1140 | 1295 | .runtime_suspend = nhi_runtime_suspend, |
---|
.. | .. |
---|
1182 | 1337 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, |
---|
1183 | 1338 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) }, |
---|
1184 | 1339 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) }, |
---|
| 1340 | + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0), |
---|
| 1341 | + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, |
---|
| 1342 | + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1), |
---|
| 1343 | + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, |
---|
| 1344 | + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0), |
---|
| 1345 | + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, |
---|
| 1346 | + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1), |
---|
| 1347 | + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, |
---|
| 1348 | + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0), |
---|
| 1349 | + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, |
---|
| 1350 | + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1), |
---|
| 1351 | + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, |
---|
| 1352 | + |
---|
| 1353 | + /* Any USB4 compliant host */ |
---|
| 1354 | + { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) }, |
---|
1185 | 1355 | |
---|
1186 | 1356 | { 0,} |
---|
1187 | 1357 | }; |
---|
.. | .. |
---|
1194 | 1364 | .id_table = nhi_ids, |
---|
1195 | 1365 | .probe = nhi_probe, |
---|
1196 | 1366 | .remove = nhi_remove, |
---|
| 1367 | + .shutdown = nhi_remove, |
---|
1197 | 1368 | .driver.pm = &nhi_pm_ops, |
---|
1198 | 1369 | }; |
---|
1199 | 1370 | |
---|