hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/thunderbolt/nhi.c
....@@ -1,10 +1,12 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
2
- * Thunderbolt Cactus Ridge driver - NHI driver
3
+ * Thunderbolt driver - NHI driver
34 *
45 * The NHI (native host interface) is the pci device that allows us to send and
56 * receive frames from the thunderbolt bus.
67 *
78 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
9
+ * Copyright (C) 2018, Intel Corporation
810 */
911
1012 #include <linux/pm_runtime.h>
....@@ -14,6 +16,8 @@
1416 #include <linux/interrupt.h>
1517 #include <linux/module.h>
1618 #include <linux/delay.h>
19
+#include <linux/property.h>
20
+#include <linux/platform_data/x86/apple.h>
1721
1822 #include "nhi.h"
1923 #include "nhi_regs.h"
....@@ -21,13 +25,7 @@
2125
2226 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
2327
24
-/*
25
- * Used to enable end-to-end workaround for missing RX packets. Do not
26
- * use this ring for anything else.
27
- */
28
-#define RING_E2E_UNUSED_HOPID 2
29
-/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
30
-#define RING_FIRST_USABLE_HOPID 8
28
+#define RING_FIRST_USABLE_HOPID 1
3129
3230 /*
3331 * Minimal number of vectors when we use MSI-X. Two for control channel
....@@ -38,7 +36,7 @@
3836
3937 #define NHI_MAILBOX_TIMEOUT 500 /* ms */
4038
41
-static int ring_interrupt_index(struct tb_ring *ring)
39
+static int ring_interrupt_index(const struct tb_ring *ring)
4240 {
4341 int bit = ring->hop;
4442 if (!ring->is_tx)
....@@ -95,9 +93,9 @@
9593 else
9694 new = old & ~mask;
9795
98
- dev_info(&ring->nhi->pdev->dev,
99
- "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
100
- active ? "enabling" : "disabling", reg, bit, old, new);
96
+ dev_dbg(&ring->nhi->pdev->dev,
97
+ "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
98
+ active ? "enabling" : "disabling", reg, bit, old, new);
10199
102100 if (new == old)
103101 dev_WARN(&ring->nhi->pdev->dev,
....@@ -449,7 +447,7 @@
449447
450448 /*
451449 * Automatically allocate HopID from the non-reserved
452
- * range 8 .. hop_count - 1.
450
+ * range 1 .. hop_count - 1.
453451 */
454452 for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
455453 if (ring->is_tx) {
....@@ -501,12 +499,9 @@
501499 void *poll_data)
502500 {
503501 struct tb_ring *ring = NULL;
504
- dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
505
- transmit ? "TX" : "RX", hop, size);
506502
507
- /* Tx Ring 2 is reserved for E2E workaround */
508
- if (transmit && hop == RING_E2E_UNUSED_HOPID)
509
- return NULL;
503
+ dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
504
+ transmit ? "TX" : "RX", hop, size);
510505
511506 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
512507 if (!ring)
....@@ -610,8 +605,8 @@
610605 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
611606 goto err;
612607 }
613
- dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
614
- RING_TYPE(ring), ring->hop);
608
+ dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
609
+ RING_TYPE(ring), ring->hop);
615610
616611 if (ring->flags & RING_FLAG_FRAME) {
617612 /* Means 4096 */
....@@ -620,19 +615,6 @@
620615 } else {
621616 frame_size = TB_FRAME_SIZE;
622617 flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
623
- }
624
-
625
- if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
626
- u32 hop;
627
-
628
- /*
629
- * In order not to lose Rx packets we enable end-to-end
630
- * workaround which transfers Rx credits to an unused Tx
631
- * HopID.
632
- */
633
- hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
634
- hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
635
- flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
636618 }
637619
638620 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
....@@ -672,8 +654,8 @@
672654 {
673655 spin_lock_irq(&ring->nhi->lock);
674656 spin_lock(&ring->lock);
675
- dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
676
- RING_TYPE(ring), ring->hop);
657
+ dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
658
+ RING_TYPE(ring), ring->hop);
677659 if (ring->nhi->going_away)
678660 goto err;
679661 if (!ring->running) {
....@@ -741,10 +723,8 @@
741723 ring->descriptors_dma = 0;
742724
743725
744
- dev_info(&ring->nhi->pdev->dev,
745
- "freeing %s %d\n",
746
- RING_TYPE(ring),
747
- ring->hop);
726
+ dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
727
+ ring->hop);
748728
749729 /**
750730 * ring->work can no longer be scheduled (it is scheduled only
....@@ -870,12 +850,68 @@
870850 return IRQ_HANDLED;
871851 }
872852
853
+static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
854
+{
855
+ struct pci_dev *pdev = to_pci_dev(dev);
856
+ struct tb *tb = pci_get_drvdata(pdev);
857
+ struct tb_nhi *nhi = tb->nhi;
858
+ int ret;
859
+
860
+ ret = tb_domain_suspend_noirq(tb);
861
+ if (ret)
862
+ return ret;
863
+
864
+ if (nhi->ops && nhi->ops->suspend_noirq) {
865
+ ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
866
+ if (ret)
867
+ return ret;
868
+ }
869
+
870
+ return 0;
871
+}
872
+
873873 static int nhi_suspend_noirq(struct device *dev)
874
+{
875
+ return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
876
+}
877
+
878
+static int nhi_freeze_noirq(struct device *dev)
874879 {
875880 struct pci_dev *pdev = to_pci_dev(dev);
876881 struct tb *tb = pci_get_drvdata(pdev);
877882
878
- return tb_domain_suspend_noirq(tb);
883
+ return tb_domain_freeze_noirq(tb);
884
+}
885
+
886
+static int nhi_thaw_noirq(struct device *dev)
887
+{
888
+ struct pci_dev *pdev = to_pci_dev(dev);
889
+ struct tb *tb = pci_get_drvdata(pdev);
890
+
891
+ return tb_domain_thaw_noirq(tb);
892
+}
893
+
894
+static bool nhi_wake_supported(struct pci_dev *pdev)
895
+{
896
+ u8 val;
897
+
898
+ /*
899
+ * If power rails are sustainable for wakeup from S4 this
900
+ * property is set by the BIOS.
901
+ */
902
+ if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
903
+ return !!val;
904
+
905
+ return true;
906
+}
907
+
908
+static int nhi_poweroff_noirq(struct device *dev)
909
+{
910
+ struct pci_dev *pdev = to_pci_dev(dev);
911
+ bool wakeup;
912
+
913
+ wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
914
+ return __nhi_suspend_noirq(dev, wakeup);
879915 }
880916
881917 static void nhi_enable_int_throttling(struct tb_nhi *nhi)
....@@ -898,16 +934,24 @@
898934 {
899935 struct pci_dev *pdev = to_pci_dev(dev);
900936 struct tb *tb = pci_get_drvdata(pdev);
937
+ struct tb_nhi *nhi = tb->nhi;
938
+ int ret;
901939
902940 /*
903941 * Check that the device is still there. It may be that the user
904942 * unplugged last device which causes the host controller to go
905943 * away on PCs.
906944 */
907
- if (!pci_device_is_present(pdev))
908
- tb->nhi->going_away = true;
909
- else
945
+ if (!pci_device_is_present(pdev)) {
946
+ nhi->going_away = true;
947
+ } else {
948
+ if (nhi->ops && nhi->ops->resume_noirq) {
949
+ ret = nhi->ops->resume_noirq(nhi);
950
+ if (ret)
951
+ return ret;
952
+ }
910953 nhi_enable_int_throttling(tb->nhi);
954
+ }
911955
912956 return tb_domain_resume_noirq(tb);
913957 }
....@@ -940,23 +984,43 @@
940984 {
941985 struct pci_dev *pdev = to_pci_dev(dev);
942986 struct tb *tb = pci_get_drvdata(pdev);
987
+ struct tb_nhi *nhi = tb->nhi;
988
+ int ret;
943989
944
- return tb_domain_runtime_suspend(tb);
990
+ ret = tb_domain_runtime_suspend(tb);
991
+ if (ret)
992
+ return ret;
993
+
994
+ if (nhi->ops && nhi->ops->runtime_suspend) {
995
+ ret = nhi->ops->runtime_suspend(tb->nhi);
996
+ if (ret)
997
+ return ret;
998
+ }
999
+ return 0;
9451000 }
9461001
9471002 static int nhi_runtime_resume(struct device *dev)
9481003 {
9491004 struct pci_dev *pdev = to_pci_dev(dev);
9501005 struct tb *tb = pci_get_drvdata(pdev);
1006
+ struct tb_nhi *nhi = tb->nhi;
1007
+ int ret;
9511008
952
- nhi_enable_int_throttling(tb->nhi);
1009
+ if (nhi->ops && nhi->ops->runtime_resume) {
1010
+ ret = nhi->ops->runtime_resume(nhi);
1011
+ if (ret)
1012
+ return ret;
1013
+ }
1014
+
1015
+ nhi_enable_int_throttling(nhi);
9531016 return tb_domain_runtime_resume(tb);
9541017 }
9551018
9561019 static void nhi_shutdown(struct tb_nhi *nhi)
9571020 {
9581021 int i;
959
- dev_info(&nhi->pdev->dev, "shutdown\n");
1022
+
1023
+ dev_dbg(&nhi->pdev->dev, "shutdown\n");
9601024
9611025 for (i = 0; i < nhi->hop_count; i++) {
9621026 if (nhi->tx_rings[i])
....@@ -976,6 +1040,9 @@
9761040 flush_work(&nhi->interrupt_work);
9771041 }
9781042 ida_destroy(&nhi->msix_ida);
1043
+
1044
+ if (nhi->ops && nhi->ops->shutdown)
1045
+ nhi->ops->shutdown(nhi);
9791046 }
9801047
9811048 static int nhi_init_msi(struct tb_nhi *nhi)
....@@ -1020,11 +1087,89 @@
10201087 return 0;
10211088 }
10221089
1090
+static bool nhi_imr_valid(struct pci_dev *pdev)
1091
+{
1092
+ u8 val;
1093
+
1094
+ if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
1095
+ return !!val;
1096
+
1097
+ return true;
1098
+}
1099
+
1100
+/*
1101
+ * During suspend the Thunderbolt controller is reset and all PCIe
1102
+ * tunnels are lost. The NHI driver will try to reestablish all tunnels
1103
+ * during resume. This adds device links between the tunneled PCIe
1104
+ * downstream ports and the NHI so that the device core will make sure
1105
+ * NHI is resumed first before the rest.
1106
+ */
1107
+static void tb_apple_add_links(struct tb_nhi *nhi)
1108
+{
1109
+ struct pci_dev *upstream, *pdev;
1110
+
1111
+ if (!x86_apple_machine)
1112
+ return;
1113
+
1114
+ switch (nhi->pdev->device) {
1115
+ case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1116
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1117
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1118
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1119
+ break;
1120
+ default:
1121
+ return;
1122
+ }
1123
+
1124
+ upstream = pci_upstream_bridge(nhi->pdev);
1125
+ while (upstream) {
1126
+ if (!pci_is_pcie(upstream))
1127
+ return;
1128
+ if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1129
+ break;
1130
+ upstream = pci_upstream_bridge(upstream);
1131
+ }
1132
+
1133
+ if (!upstream)
1134
+ return;
1135
+
1136
+ /*
1137
+ * For each hotplug downstream port, create add device link
1138
+ * back to NHI so that PCIe tunnels can be re-established after
1139
+ * sleep.
1140
+ */
1141
+ for_each_pci_bridge(pdev, upstream->subordinate) {
1142
+ const struct device_link *link;
1143
+
1144
+ if (!pci_is_pcie(pdev))
1145
+ continue;
1146
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1147
+ !pdev->is_hotplug_bridge)
1148
+ continue;
1149
+
1150
+ link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1151
+ DL_FLAG_AUTOREMOVE_SUPPLIER |
1152
+ DL_FLAG_PM_RUNTIME);
1153
+ if (link) {
1154
+ dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1155
+ dev_name(&pdev->dev));
1156
+ } else {
1157
+ dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1158
+ dev_name(&pdev->dev));
1159
+ }
1160
+ }
1161
+}
1162
+
10231163 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10241164 {
10251165 struct tb_nhi *nhi;
10261166 struct tb *tb;
10271167 int res;
1168
+
1169
+ if (!nhi_imr_valid(pdev)) {
1170
+ dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
1171
+ return -ENODEV;
1172
+ }
10281173
10291174 res = pcim_enable_device(pdev);
10301175 if (res) {
....@@ -1043,12 +1188,11 @@
10431188 return -ENOMEM;
10441189
10451190 nhi->pdev = pdev;
1191
+ nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
10461192 /* cannot fail - table is allocated bin pcim_iomap_regions */
10471193 nhi->iobase = pcim_iomap_table(pdev)[0];
10481194 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
1049
- if (nhi->hop_count != 12 && nhi->hop_count != 32)
1050
- dev_warn(&pdev->dev, "unexpected hop count: %d\n",
1051
- nhi->hop_count);
1195
+ dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
10521196
10531197 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
10541198 sizeof(*nhi->tx_rings), GFP_KERNEL);
....@@ -1075,6 +1219,15 @@
10751219
10761220 pci_set_master(pdev);
10771221
1222
+ if (nhi->ops && nhi->ops->init) {
1223
+ res = nhi->ops->init(nhi);
1224
+ if (res)
1225
+ return res;
1226
+ }
1227
+
1228
+ tb_apple_add_links(nhi);
1229
+ tb_acpi_add_links(nhi);
1230
+
10781231 tb = icm_probe(nhi);
10791232 if (!tb)
10801233 tb = tb_probe(nhi);
....@@ -1084,7 +1237,7 @@
10841237 return -ENODEV;
10851238 }
10861239
1087
- dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
1240
+ dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
10881241
10891242 res = tb_domain_add(tb);
10901243 if (res) {
....@@ -1097,6 +1250,8 @@
10971250 return res;
10981251 }
10991252 pci_set_drvdata(pdev, tb);
1253
+
1254
+ device_wakeup_enable(&pdev->dev);
11001255
11011256 pm_runtime_allow(&pdev->dev);
11021257 pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
....@@ -1127,14 +1282,14 @@
11271282 static const struct dev_pm_ops nhi_pm_ops = {
11281283 .suspend_noirq = nhi_suspend_noirq,
11291284 .resume_noirq = nhi_resume_noirq,
1130
- .freeze_noirq = nhi_suspend_noirq, /*
1285
+ .freeze_noirq = nhi_freeze_noirq, /*
11311286 * we just disable hotplug, the
11321287 * pci-tunnels stay alive.
11331288 */
1134
- .thaw_noirq = nhi_resume_noirq,
1289
+ .thaw_noirq = nhi_thaw_noirq,
11351290 .restore_noirq = nhi_resume_noirq,
11361291 .suspend = nhi_suspend,
1137
- .freeze = nhi_suspend,
1292
+ .poweroff_noirq = nhi_poweroff_noirq,
11381293 .poweroff = nhi_suspend,
11391294 .complete = nhi_complete,
11401295 .runtime_suspend = nhi_runtime_suspend,
....@@ -1182,6 +1337,21 @@
11821337 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
11831338 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
11841339 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
1340
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
1341
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1342
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
1343
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1344
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
1345
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1346
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
1347
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1348
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
1349
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1350
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
1351
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1352
+
1353
+ /* Any USB4 compliant host */
1354
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
11851355
11861356 { 0,}
11871357 };
....@@ -1194,6 +1364,7 @@
11941364 .id_table = nhi_ids,
11951365 .probe = nhi_probe,
11961366 .remove = nhi_remove,
1367
+ .shutdown = nhi_remove,
11971368 .driver.pm = &nhi_pm_ops,
11981369 };
11991370