hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/drivers/thunderbolt/xdomain.c
....@@ -1,13 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Thunderbolt XDomain discovery protocol support
34 *
45 * Copyright (C) 2017, Intel Corporation
56 * Authors: Michael Jamet <michael.jamet@intel.com>
67 * Mika Westerberg <mika.westerberg@linux.intel.com>
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License version 2 as
10
- * published by the Free Software Foundation.
118 */
129
1310 #include <linux/device.h>
....@@ -21,6 +18,7 @@
2118 #include "tb.h"
2219
2320 #define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
21
+#define XDOMAIN_UUID_RETRIES 10
2422 #define XDOMAIN_PROPERTIES_RETRIES 60
2523 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
2624
....@@ -223,6 +221,50 @@
223221 }
224222
225223 return 0;
224
+}
225
+
226
+static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
227
+ uuid_t *uuid)
228
+{
229
+ struct tb_xdp_uuid_response res;
230
+ struct tb_xdp_uuid req;
231
+ int ret;
232
+
233
+ memset(&req, 0, sizeof(req));
234
+ tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
235
+ sizeof(req));
236
+
237
+ memset(&res, 0, sizeof(res));
238
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
239
+ TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
240
+ TB_CFG_PKG_XDOMAIN_RESP,
241
+ XDOMAIN_DEFAULT_TIMEOUT);
242
+ if (ret)
243
+ return ret;
244
+
245
+ ret = tb_xdp_handle_error(&res.hdr);
246
+ if (ret)
247
+ return ret;
248
+
249
+ uuid_copy(uuid, &res.src_uuid);
250
+ return 0;
251
+}
252
+
253
+static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
254
+ const uuid_t *uuid)
255
+{
256
+ struct tb_xdp_uuid_response res;
257
+
258
+ memset(&res, 0, sizeof(res));
259
+ tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
260
+ sizeof(res));
261
+
262
+ uuid_copy(&res.src_uuid, uuid);
263
+ res.src_route_hi = upper_32_bits(route);
264
+ res.src_route_lo = lower_32_bits(route);
265
+
266
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
267
+ TB_CFG_PKG_XDOMAIN_RESP);
226268 }
227269
228270 static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
....@@ -459,6 +501,55 @@
459501 }
460502 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
461503
504
+static int rebuild_property_block(void)
505
+{
506
+ u32 *block, len;
507
+ int ret;
508
+
509
+ ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
510
+ if (ret < 0)
511
+ return ret;
512
+
513
+ len = ret;
514
+
515
+ block = kcalloc(len, sizeof(u32), GFP_KERNEL);
516
+ if (!block)
517
+ return -ENOMEM;
518
+
519
+ ret = tb_property_format_dir(xdomain_property_dir, block, len);
520
+ if (ret) {
521
+ kfree(block);
522
+ return ret;
523
+ }
524
+
525
+ kfree(xdomain_property_block);
526
+ xdomain_property_block = block;
527
+ xdomain_property_block_len = len;
528
+ xdomain_property_block_gen++;
529
+
530
+ return 0;
531
+}
532
+
533
+static void finalize_property_block(void)
534
+{
535
+ const struct tb_property *nodename;
536
+
537
+ /*
538
+ * On first XDomain connection we set up the the system
539
+ * nodename. This delayed here because userspace may not have it
540
+ * set when the driver is first probed.
541
+ */
542
+ mutex_lock(&xdomain_lock);
543
+ nodename = tb_property_find(xdomain_property_dir, "deviceid",
544
+ TB_PROPERTY_TYPE_TEXT);
545
+ if (!nodename) {
546
+ tb_property_add_text(xdomain_property_dir, "deviceid",
547
+ utsname()->nodename);
548
+ rebuild_property_block();
549
+ }
550
+ mutex_unlock(&xdomain_lock);
551
+}
552
+
462553 static void tb_xdp_handle_request(struct work_struct *work)
463554 {
464555 struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
....@@ -486,6 +577,8 @@
486577 tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
487578 goto out;
488579 }
580
+
581
+ finalize_property_block();
489582
490583 switch (pkg->type) {
491584 case PROPERTIES_REQUEST:
....@@ -515,7 +608,14 @@
515608 break;
516609 }
517610
611
+ case UUID_REQUEST_OLD:
612
+ case UUID_REQUEST:
613
+ ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
614
+ break;
615
+
518616 default:
617
+ tb_xdp_error_response(ctl, route, sequence,
618
+ ERROR_NOT_SUPPORTED);
519619 break;
520620 }
521621
....@@ -527,9 +627,11 @@
527627 out:
528628 kfree(xw->pkg);
529629 kfree(xw);
630
+
631
+ tb_domain_put(tb);
530632 }
531633
532
-static void
634
+static bool
533635 tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
534636 size_t size)
535637 {
....@@ -537,13 +639,18 @@
537639
538640 xw = kmalloc(sizeof(*xw), GFP_KERNEL);
539641 if (!xw)
540
- return;
642
+ return false;
541643
542644 INIT_WORK(&xw->work, tb_xdp_handle_request);
543645 xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
544
- xw->tb = tb;
646
+ if (!xw->pkg) {
647
+ kfree(xw);
648
+ return false;
649
+ }
650
+ xw->tb = tb_domain_get(tb);
545651
546
- queue_work(tb->wq, &xw->work);
652
+ schedule_work(&xw->work);
653
+ return true;
547654 }
548655
549656 /**
....@@ -580,7 +687,7 @@
580687 * It should be null terminated but anything else is pretty much
581688 * allowed.
582689 */
583
- return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
690
+ return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
584691 }
585692 static DEVICE_ATTR_RO(key);
586693
....@@ -836,6 +943,55 @@
836943 }
837944 }
838945
946
+static void tb_xdomain_get_uuid(struct work_struct *work)
947
+{
948
+ struct tb_xdomain *xd = container_of(work, typeof(*xd),
949
+ get_uuid_work.work);
950
+ struct tb *tb = xd->tb;
951
+ uuid_t uuid;
952
+ int ret;
953
+
954
+ ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
955
+ if (ret < 0) {
956
+ if (xd->uuid_retries-- > 0) {
957
+ queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
958
+ msecs_to_jiffies(100));
959
+ } else {
960
+ dev_dbg(&xd->dev, "failed to read remote UUID\n");
961
+ }
962
+ return;
963
+ }
964
+
965
+ if (uuid_equal(&uuid, xd->local_uuid)) {
966
+ dev_dbg(&xd->dev, "intra-domain loop detected\n");
967
+ return;
968
+ }
969
+
970
+ /*
971
+ * If the UUID is different, there is another domain connected
972
+ * so mark this one unplugged and wait for the connection
973
+ * manager to replace it.
974
+ */
975
+ if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
976
+ dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
977
+ xd->is_unplugged = true;
978
+ return;
979
+ }
980
+
981
+ /* First time fill in the missing UUID */
982
+ if (!xd->remote_uuid) {
983
+ xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
984
+ if (!xd->remote_uuid)
985
+ return;
986
+ }
987
+
988
+ /* Now we can start the normal properties exchange */
989
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
990
+ msecs_to_jiffies(100));
991
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
992
+ msecs_to_jiffies(1000));
993
+}
994
+
839995 static void tb_xdomain_get_properties(struct work_struct *work)
840996 {
841997 struct tb_xdomain *xd = container_of(work, typeof(*xd),
....@@ -1042,21 +1198,29 @@
10421198
10431199 static void start_handshake(struct tb_xdomain *xd)
10441200 {
1201
+ xd->uuid_retries = XDOMAIN_UUID_RETRIES;
10451202 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
10461203 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
10471204
1048
- /* Start exchanging properties with the other host */
1049
- queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1050
- msecs_to_jiffies(100));
1051
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1052
- msecs_to_jiffies(1000));
1205
+ if (xd->needs_uuid) {
1206
+ queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1207
+ msecs_to_jiffies(100));
1208
+ } else {
1209
+ /* Start exchanging properties with the other host */
1210
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1211
+ msecs_to_jiffies(100));
1212
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1213
+ msecs_to_jiffies(1000));
1214
+ }
10531215 }
10541216
10551217 static void stop_handshake(struct tb_xdomain *xd)
10561218 {
1219
+ xd->uuid_retries = 0;
10571220 xd->properties_retries = 0;
10581221 xd->properties_changed_retries = 0;
10591222
1223
+ cancel_delayed_work_sync(&xd->get_uuid_work);
10601224 cancel_delayed_work_sync(&xd->get_properties_work);
10611225 cancel_delayed_work_sync(&xd->properties_changed_work);
10621226 }
....@@ -1099,7 +1263,7 @@
10991263 * other domain is reached).
11001264 * @route: Route string used to reach the other domain
11011265 * @local_uuid: Our local domain UUID
1102
- * @remote_uuid: UUID of the other domain
1266
+ * @remote_uuid: UUID of the other domain (optional)
11031267 *
11041268 * Allocates new XDomain structure and returns pointer to that. The
11051269 * object must be released by calling tb_xdomain_put().
....@@ -1108,7 +1272,13 @@
11081272 u64 route, const uuid_t *local_uuid,
11091273 const uuid_t *remote_uuid)
11101274 {
1275
+ struct tb_switch *parent_sw = tb_to_switch(parent);
11111276 struct tb_xdomain *xd;
1277
+ struct tb_port *down;
1278
+
1279
+ /* Make sure the downstream domain is accessible */
1280
+ down = tb_port_at(route, parent_sw);
1281
+ tb_port_unlock(down);
11121282
11131283 xd = kzalloc(sizeof(*xd), GFP_KERNEL);
11141284 if (!xd)
....@@ -1118,6 +1288,7 @@
11181288 xd->route = route;
11191289 ida_init(&xd->service_ids);
11201290 mutex_init(&xd->lock);
1291
+ INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
11211292 INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
11221293 INIT_DELAYED_WORK(&xd->properties_changed_work,
11231294 tb_xdomain_properties_changed);
....@@ -1126,9 +1297,14 @@
11261297 if (!xd->local_uuid)
11271298 goto err_free;
11281299
1129
- xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
1130
- if (!xd->remote_uuid)
1131
- goto err_free_local_uuid;
1300
+ if (remote_uuid) {
1301
+ xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1302
+ GFP_KERNEL);
1303
+ if (!xd->remote_uuid)
1304
+ goto err_free_local_uuid;
1305
+ } else {
1306
+ xd->needs_uuid = true;
1307
+ }
11321308
11331309 device_initialize(&xd->dev);
11341310 xd->dev.parent = get_device(parent);
....@@ -1286,20 +1462,17 @@
12861462 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
12871463 const struct tb_xdomain_lookup *lookup)
12881464 {
1289
- int i;
1465
+ struct tb_port *port;
12901466
1291
- for (i = 1; i <= sw->config.max_port_number; i++) {
1292
- struct tb_port *port = &sw->ports[i];
1467
+ tb_switch_for_each_port(sw, port) {
12931468 struct tb_xdomain *xd;
1294
-
1295
- if (tb_is_upstream_port(port))
1296
- continue;
12971469
12981470 if (port->xdomain) {
12991471 xd = port->xdomain;
13001472
13011473 if (lookup->uuid) {
1302
- if (uuid_equal(xd->remote_uuid, lookup->uuid))
1474
+ if (xd->remote_uuid &&
1475
+ uuid_equal(xd->remote_uuid, lookup->uuid))
13031476 return xd;
13041477 } else if (lookup->link &&
13051478 lookup->link == xd->link &&
....@@ -1309,7 +1482,7 @@
13091482 lookup->route == xd->route) {
13101483 return xd;
13111484 }
1312
- } else if (port->remote) {
1485
+ } else if (tb_port_has_remote(port)) {
13131486 xd = switch_find_xdomain(port->remote->sw, lookup);
13141487 if (xd)
13151488 return xd;
....@@ -1426,10 +1599,8 @@
14261599 * handlers in turn.
14271600 */
14281601 if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
1429
- if (type == TB_CFG_PKG_XDOMAIN_REQ) {
1430
- tb_xdp_schedule_request(tb, hdr, size);
1431
- return true;
1432
- }
1602
+ if (type == TB_CFG_PKG_XDOMAIN_REQ)
1603
+ return tb_xdp_schedule_request(tb, hdr, size);
14331604 return false;
14341605 }
14351606
....@@ -1448,35 +1619,6 @@
14481619 mutex_unlock(&xdomain_lock);
14491620
14501621 return ret > 0;
1451
-}
1452
-
1453
-static int rebuild_property_block(void)
1454
-{
1455
- u32 *block, len;
1456
- int ret;
1457
-
1458
- ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
1459
- if (ret < 0)
1460
- return ret;
1461
-
1462
- len = ret;
1463
-
1464
- block = kcalloc(len, sizeof(u32), GFP_KERNEL);
1465
- if (!block)
1466
- return -ENOMEM;
1467
-
1468
- ret = tb_property_format_dir(xdomain_property_dir, block, len);
1469
- if (ret) {
1470
- kfree(block);
1471
- return ret;
1472
- }
1473
-
1474
- kfree(xdomain_property_block);
1475
- xdomain_property_block = block;
1476
- xdomain_property_block_len = len;
1477
- xdomain_property_block_gen++;
1478
-
1479
- return 0;
14801622 }
14811623
14821624 static int update_xdomain(struct device *dev, void *data)
....@@ -1583,8 +1725,6 @@
15831725
15841726 int tb_xdomain_init(void)
15851727 {
1586
- int ret;
1587
-
15881728 xdomain_property_dir = tb_property_create_dir(NULL);
15891729 if (!xdomain_property_dir)
15901730 return -ENOMEM;
....@@ -1593,22 +1733,16 @@
15931733 * Initialize standard set of properties without any service
15941734 * directories. Those will be added by service drivers
15951735 * themselves when they are loaded.
1736
+ *
1737
+ * We also add node name later when first connection is made.
15961738 */
15971739 tb_property_add_immediate(xdomain_property_dir, "vendorid",
15981740 PCI_VENDOR_ID_INTEL);
15991741 tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
16001742 tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
1601
- tb_property_add_text(xdomain_property_dir, "deviceid",
1602
- utsname()->nodename);
16031743 tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
16041744
1605
- ret = rebuild_property_block();
1606
- if (ret) {
1607
- tb_property_free_dir(xdomain_property_dir);
1608
- xdomain_property_dir = NULL;
1609
- }
1610
-
1611
- return ret;
1745
+ return 0;
16121746 }
16131747
16141748 void tb_xdomain_exit(void)