.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
---|
1 | 2 | /* |
---|
2 | 3 | * Internal Thunderbolt Connection Manager. This is a firmware running on |
---|
3 | 4 | * the Thunderbolt host controller performing most of the low-level |
---|
.. | .. |
---|
6 | 7 | * Copyright (C) 2017, Intel Corporation |
---|
7 | 8 | * Authors: Michael Jamet <michael.jamet@intel.com> |
---|
8 | 9 | * Mika Westerberg <mika.westerberg@linux.intel.com> |
---|
9 | | - * |
---|
10 | | - * This program is free software; you can redistribute it and/or modify |
---|
11 | | - * it under the terms of the GNU General Public License version 2 as |
---|
12 | | - * published by the Free Software Foundation. |
---|
13 | 10 | */ |
---|
14 | 11 | |
---|
15 | 12 | #include <linux/delay.h> |
---|
16 | 13 | #include <linux/mutex.h> |
---|
| 14 | +#include <linux/moduleparam.h> |
---|
17 | 15 | #include <linux/pci.h> |
---|
18 | 16 | #include <linux/pm_runtime.h> |
---|
19 | 17 | #include <linux/platform_data/x86/apple.h> |
---|
.. | .. |
---|
45 | 43 | #define ICM_TIMEOUT 5000 /* ms */ |
---|
46 | 44 | #define ICM_APPROVE_TIMEOUT 10000 /* ms */ |
---|
47 | 45 | #define ICM_MAX_LINK 4 |
---|
48 | | -#define ICM_MAX_DEPTH 6 |
---|
| 46 | + |
---|
| 47 | +static bool start_icm; |
---|
| 48 | +module_param(start_icm, bool, 0444); |
---|
| 49 | +MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)"); |
---|
49 | 50 | |
---|
50 | 51 | /** |
---|
51 | 52 | * struct icm - Internal connection manager private data |
---|
.. | .. |
---|
59 | 60 | * @safe_mode: ICM is in safe mode |
---|
60 | 61 | * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) |
---|
61 | 62 | * @rpm: Does the controller support runtime PM (RTD3) |
---|
| 63 | + * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller |
---|
| 64 | + * @veto: Is RTD3 veto in effect |
---|
62 | 65 | * @is_supported: Checks if we can support ICM on this controller |
---|
| 66 | + * @cio_reset: Trigger CIO reset |
---|
63 | 67 | * @get_mode: Read and return the ICM firmware mode (optional) |
---|
64 | 68 | * @get_route: Find a route string for given switch |
---|
65 | 69 | * @save_devices: Ask ICM to save devices to ACL when suspending (optional) |
---|
66 | 70 | * @driver_ready: Send driver ready message to ICM |
---|
| 71 | + * @set_uuid: Set UUID for the root switch (optional) |
---|
67 | 72 | * @device_connected: Handle device connected ICM message |
---|
68 | 73 | * @device_disconnected: Handle device disconnected ICM message |
---|
69 | 74 | * @xdomain_connected - Handle XDomain connected ICM message |
---|
70 | 75 | * @xdomain_disconnected - Handle XDomain disconnected ICM message |
---|
| 76 | + * @rtd3_veto: Handle RTD3 veto notification ICM message |
---|
71 | 77 | */ |
---|
72 | 78 | struct icm { |
---|
73 | 79 | struct mutex request_lock; |
---|
.. | .. |
---|
77 | 83 | int vnd_cap; |
---|
78 | 84 | bool safe_mode; |
---|
79 | 85 | bool rpm; |
---|
| 86 | + bool can_upgrade_nvm; |
---|
| 87 | + bool veto; |
---|
80 | 88 | bool (*is_supported)(struct tb *tb); |
---|
| 89 | + int (*cio_reset)(struct tb *tb); |
---|
81 | 90 | int (*get_mode)(struct tb *tb); |
---|
82 | 91 | int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); |
---|
83 | 92 | void (*save_devices)(struct tb *tb); |
---|
84 | 93 | int (*driver_ready)(struct tb *tb, |
---|
85 | 94 | enum tb_security_level *security_level, |
---|
86 | 95 | size_t *nboot_acl, bool *rpm); |
---|
| 96 | + void (*set_uuid)(struct tb *tb); |
---|
87 | 97 | void (*device_connected)(struct tb *tb, |
---|
88 | 98 | const struct icm_pkg_header *hdr); |
---|
89 | 99 | void (*device_disconnected)(struct tb *tb, |
---|
.. | .. |
---|
92 | 102 | const struct icm_pkg_header *hdr); |
---|
93 | 103 | void (*xdomain_disconnected)(struct tb *tb, |
---|
94 | 104 | const struct icm_pkg_header *hdr); |
---|
| 105 | + void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr); |
---|
95 | 106 | }; |
---|
96 | 107 | |
---|
97 | 108 | struct icm_notification { |
---|
.. | .. |
---|
103 | 114 | struct ep_name_entry { |
---|
104 | 115 | u8 len; |
---|
105 | 116 | u8 type; |
---|
106 | | - u8 data[0]; |
---|
| 117 | + u8 data[]; |
---|
107 | 118 | }; |
---|
108 | 119 | |
---|
109 | 120 | #define EP_NAME_INTEL_VSS 0x10 |
---|
.. | .. |
---|
141 | 152 | return NULL; |
---|
142 | 153 | } |
---|
143 | 154 | |
---|
| 155 | +static bool intel_vss_is_rtd3(const void *ep_name, size_t size) |
---|
| 156 | +{ |
---|
| 157 | + const struct intel_vss *vss; |
---|
| 158 | + |
---|
| 159 | + vss = parse_intel_vss(ep_name, size); |
---|
| 160 | + if (vss) |
---|
| 161 | + return !!(vss->flags & INTEL_VSS_FLAGS_RTD3); |
---|
| 162 | + |
---|
| 163 | + return false; |
---|
| 164 | +} |
---|
| 165 | + |
---|
144 | 166 | static inline struct tb *icm_to_tb(struct icm *icm) |
---|
145 | 167 | { |
---|
146 | 168 | return ((void *)icm - sizeof(struct tb)); |
---|
.. | .. |
---|
168 | 190 | { |
---|
169 | 191 | int depth = tb_route_length(route); |
---|
170 | 192 | return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; |
---|
| 193 | +} |
---|
| 194 | + |
---|
| 195 | +static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) |
---|
| 196 | +{ |
---|
| 197 | + unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); |
---|
| 198 | + u32 cmd; |
---|
| 199 | + |
---|
| 200 | + do { |
---|
| 201 | + pci_read_config_dword(icm->upstream_port, |
---|
| 202 | + icm->vnd_cap + PCIE2CIO_CMD, &cmd); |
---|
| 203 | + if (!(cmd & PCIE2CIO_CMD_START)) { |
---|
| 204 | + if (cmd & PCIE2CIO_CMD_TIMEOUT) |
---|
| 205 | + break; |
---|
| 206 | + return 0; |
---|
| 207 | + } |
---|
| 208 | + |
---|
| 209 | + msleep(50); |
---|
| 210 | + } while (time_before(jiffies, end)); |
---|
| 211 | + |
---|
| 212 | + return -ETIMEDOUT; |
---|
| 213 | +} |
---|
| 214 | + |
---|
| 215 | +static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, |
---|
| 216 | + unsigned int port, unsigned int index, u32 *data) |
---|
| 217 | +{ |
---|
| 218 | + struct pci_dev *pdev = icm->upstream_port; |
---|
| 219 | + int ret, vnd_cap = icm->vnd_cap; |
---|
| 220 | + u32 cmd; |
---|
| 221 | + |
---|
| 222 | + cmd = index; |
---|
| 223 | + cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; |
---|
| 224 | + cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; |
---|
| 225 | + cmd |= PCIE2CIO_CMD_START; |
---|
| 226 | + pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); |
---|
| 227 | + |
---|
| 228 | + ret = pci2cio_wait_completion(icm, 5000); |
---|
| 229 | + if (ret) |
---|
| 230 | + return ret; |
---|
| 231 | + |
---|
| 232 | + pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); |
---|
| 233 | + return 0; |
---|
| 234 | +} |
---|
| 235 | + |
---|
| 236 | +static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, |
---|
| 237 | + unsigned int port, unsigned int index, u32 data) |
---|
| 238 | +{ |
---|
| 239 | + struct pci_dev *pdev = icm->upstream_port; |
---|
| 240 | + int vnd_cap = icm->vnd_cap; |
---|
| 241 | + u32 cmd; |
---|
| 242 | + |
---|
| 243 | + pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); |
---|
| 244 | + |
---|
| 245 | + cmd = index; |
---|
| 246 | + cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; |
---|
| 247 | + cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; |
---|
| 248 | + cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; |
---|
| 249 | + pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); |
---|
| 250 | + |
---|
| 251 | + return pci2cio_wait_completion(icm, 5000); |
---|
171 | 252 | } |
---|
172 | 253 | |
---|
173 | 254 | static bool icm_match(const struct tb_cfg_request *req, |
---|
.. | .. |
---|
235 | 316 | } while (retries--); |
---|
236 | 317 | |
---|
237 | 318 | return -ETIMEDOUT; |
---|
| 319 | +} |
---|
| 320 | + |
---|
| 321 | +/* |
---|
| 322 | + * If rescan is queued to run (we are resuming), postpone it to give the |
---|
| 323 | + * firmware some more time to send device connected notifications for next |
---|
| 324 | + * devices in the chain. |
---|
| 325 | + */ |
---|
| 326 | +static void icm_postpone_rescan(struct tb *tb) |
---|
| 327 | +{ |
---|
| 328 | + struct icm *icm = tb_priv(tb); |
---|
| 329 | + |
---|
| 330 | + if (delayed_work_pending(&icm->rescan_work)) |
---|
| 331 | + mod_delayed_work(tb->wq, &icm->rescan_work, |
---|
| 332 | + msecs_to_jiffies(500)); |
---|
| 333 | +} |
---|
| 334 | + |
---|
| 335 | +static void icm_veto_begin(struct tb *tb) |
---|
| 336 | +{ |
---|
| 337 | + struct icm *icm = tb_priv(tb); |
---|
| 338 | + |
---|
| 339 | + if (!icm->veto) { |
---|
| 340 | + icm->veto = true; |
---|
| 341 | + /* Keep the domain powered while veto is in effect */ |
---|
| 342 | + pm_runtime_get(&tb->dev); |
---|
| 343 | + } |
---|
| 344 | +} |
---|
| 345 | + |
---|
| 346 | +static void icm_veto_end(struct tb *tb) |
---|
| 347 | +{ |
---|
| 348 | + struct icm *icm = tb_priv(tb); |
---|
| 349 | + |
---|
| 350 | + if (icm->veto) { |
---|
| 351 | + icm->veto = false; |
---|
| 352 | + /* Allow the domain suspend now */ |
---|
| 353 | + pm_runtime_mark_last_busy(&tb->dev); |
---|
| 354 | + pm_runtime_put_autosuspend(&tb->dev); |
---|
| 355 | + } |
---|
| 356 | +} |
---|
| 357 | + |
---|
| 358 | +static bool icm_firmware_running(const struct tb_nhi *nhi) |
---|
| 359 | +{ |
---|
| 360 | + u32 val; |
---|
| 361 | + |
---|
| 362 | + val = ioread32(nhi->iobase + REG_FW_STS); |
---|
| 363 | + return !!(val & REG_FW_STS_ICM_EN); |
---|
238 | 364 | } |
---|
239 | 365 | |
---|
240 | 366 | static bool icm_fr_is_supported(struct tb *tb) |
---|
.. | .. |
---|
460 | 586 | return 0; |
---|
461 | 587 | } |
---|
462 | 588 | |
---|
463 | | -static void add_switch(struct tb_switch *parent_sw, u64 route, |
---|
464 | | - const uuid_t *uuid, const u8 *ep_name, |
---|
465 | | - size_t ep_name_size, u8 connection_id, u8 connection_key, |
---|
466 | | - u8 link, u8 depth, enum tb_security_level security_level, |
---|
467 | | - bool authorized, bool boot) |
---|
| 589 | +static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route, |
---|
| 590 | + const uuid_t *uuid) |
---|
468 | 591 | { |
---|
469 | | - const struct intel_vss *vss; |
---|
| 592 | + struct tb *tb = parent_sw->tb; |
---|
470 | 593 | struct tb_switch *sw; |
---|
471 | 594 | |
---|
472 | | - pm_runtime_get_sync(&parent_sw->dev); |
---|
473 | | - |
---|
474 | | - sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route); |
---|
475 | | - if (!sw) |
---|
476 | | - goto out; |
---|
| 595 | + sw = tb_switch_alloc(tb, &parent_sw->dev, route); |
---|
| 596 | + if (IS_ERR(sw)) { |
---|
| 597 | + tb_warn(tb, "failed to allocate switch at %llx\n", route); |
---|
| 598 | + return sw; |
---|
| 599 | + } |
---|
477 | 600 | |
---|
478 | 601 | sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); |
---|
479 | 602 | if (!sw->uuid) { |
---|
480 | | - tb_sw_warn(sw, "cannot allocate memory for switch\n"); |
---|
481 | 603 | tb_switch_put(sw); |
---|
482 | | - goto out; |
---|
| 604 | + return ERR_PTR(-ENOMEM); |
---|
483 | 605 | } |
---|
484 | | - sw->connection_id = connection_id; |
---|
485 | | - sw->connection_key = connection_key; |
---|
486 | | - sw->link = link; |
---|
487 | | - sw->depth = depth; |
---|
488 | | - sw->authorized = authorized; |
---|
489 | | - sw->security_level = security_level; |
---|
490 | | - sw->boot = boot; |
---|
491 | 606 | |
---|
492 | | - vss = parse_intel_vss(ep_name, ep_name_size); |
---|
493 | | - if (vss) |
---|
494 | | - sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3); |
---|
| 607 | + init_completion(&sw->rpm_complete); |
---|
| 608 | + return sw; |
---|
| 609 | +} |
---|
| 610 | + |
---|
| 611 | +static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw) |
---|
| 612 | +{ |
---|
| 613 | + u64 route = tb_route(sw); |
---|
| 614 | + int ret; |
---|
495 | 615 | |
---|
496 | 616 | /* Link the two switches now */ |
---|
497 | 617 | tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); |
---|
498 | 618 | tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); |
---|
499 | 619 | |
---|
500 | | - if (tb_switch_add(sw)) { |
---|
| 620 | + ret = tb_switch_add(sw); |
---|
| 621 | + if (ret) |
---|
501 | 622 | tb_port_at(tb_route(sw), parent_sw)->remote = NULL; |
---|
502 | | - tb_switch_put(sw); |
---|
503 | | - } |
---|
504 | 623 | |
---|
505 | | -out: |
---|
506 | | - pm_runtime_mark_last_busy(&parent_sw->dev); |
---|
507 | | - pm_runtime_put_autosuspend(&parent_sw->dev); |
---|
| 624 | + return ret; |
---|
508 | 625 | } |
---|
509 | 626 | |
---|
510 | 627 | static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, |
---|
.. | .. |
---|
527 | 644 | |
---|
528 | 645 | /* This switch still exists */ |
---|
529 | 646 | sw->is_unplugged = false; |
---|
| 647 | + |
---|
| 648 | + /* Runtime resume is now complete */ |
---|
| 649 | + complete(&sw->rpm_complete); |
---|
530 | 650 | } |
---|
531 | 651 | |
---|
532 | 652 | static void remove_switch(struct tb_switch *sw) |
---|
.. | .. |
---|
585 | 705 | (const struct icm_fr_event_device_connected *)hdr; |
---|
586 | 706 | enum tb_security_level security_level; |
---|
587 | 707 | struct tb_switch *sw, *parent_sw; |
---|
| 708 | + bool boot, dual_lane, speed_gen3; |
---|
588 | 709 | struct icm *icm = tb_priv(tb); |
---|
589 | 710 | bool authorized = false; |
---|
590 | 711 | struct tb_xdomain *xd; |
---|
591 | 712 | u8 link, depth; |
---|
592 | | - bool boot; |
---|
593 | 713 | u64 route; |
---|
594 | 714 | int ret; |
---|
| 715 | + |
---|
| 716 | + icm_postpone_rescan(tb); |
---|
595 | 717 | |
---|
596 | 718 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; |
---|
597 | 719 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> |
---|
.. | .. |
---|
600 | 722 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> |
---|
601 | 723 | ICM_FLAGS_SLEVEL_SHIFT; |
---|
602 | 724 | boot = pkg->link_info & ICM_LINK_INFO_BOOT; |
---|
| 725 | + dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; |
---|
| 726 | + speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; |
---|
603 | 727 | |
---|
604 | 728 | if (pkg->link_info & ICM_LINK_INFO_REJECTED) { |
---|
605 | 729 | tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n", |
---|
.. | .. |
---|
697 | 821 | return; |
---|
698 | 822 | } |
---|
699 | 823 | |
---|
700 | | - add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, |
---|
701 | | - sizeof(pkg->ep_name), pkg->connection_id, |
---|
702 | | - pkg->connection_key, link, depth, security_level, |
---|
703 | | - authorized, boot); |
---|
| 824 | + pm_runtime_get_sync(&parent_sw->dev); |
---|
| 825 | + |
---|
| 826 | + sw = alloc_switch(parent_sw, route, &pkg->ep_uuid); |
---|
| 827 | + if (!IS_ERR(sw)) { |
---|
| 828 | + sw->connection_id = pkg->connection_id; |
---|
| 829 | + sw->connection_key = pkg->connection_key; |
---|
| 830 | + sw->link = link; |
---|
| 831 | + sw->depth = depth; |
---|
| 832 | + sw->authorized = authorized; |
---|
| 833 | + sw->security_level = security_level; |
---|
| 834 | + sw->boot = boot; |
---|
| 835 | + sw->link_speed = speed_gen3 ? 20 : 10; |
---|
| 836 | + sw->link_width = dual_lane ? 2 : 1; |
---|
| 837 | + sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name)); |
---|
| 838 | + |
---|
| 839 | + if (add_switch(parent_sw, sw)) |
---|
| 840 | + tb_switch_put(sw); |
---|
| 841 | + } |
---|
| 842 | + |
---|
| 843 | + pm_runtime_mark_last_busy(&parent_sw->dev); |
---|
| 844 | + pm_runtime_put_autosuspend(&parent_sw->dev); |
---|
704 | 845 | |
---|
705 | 846 | tb_switch_put(parent_sw); |
---|
706 | 847 | } |
---|
.. | .. |
---|
717 | 858 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> |
---|
718 | 859 | ICM_LINK_INFO_DEPTH_SHIFT; |
---|
719 | 860 | |
---|
720 | | - if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { |
---|
| 861 | + if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { |
---|
721 | 862 | tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); |
---|
722 | 863 | return; |
---|
723 | 864 | } |
---|
.. | .. |
---|
747 | 888 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> |
---|
748 | 889 | ICM_LINK_INFO_DEPTH_SHIFT; |
---|
749 | 890 | |
---|
750 | | - if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { |
---|
| 891 | + if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { |
---|
751 | 892 | tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); |
---|
752 | 893 | return; |
---|
753 | 894 | } |
---|
.. | .. |
---|
836 | 977 | remove_xdomain(xd); |
---|
837 | 978 | tb_xdomain_put(xd); |
---|
838 | 979 | } |
---|
| 980 | +} |
---|
| 981 | + |
---|
| 982 | +static int icm_tr_cio_reset(struct tb *tb) |
---|
| 983 | +{ |
---|
| 984 | + return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1)); |
---|
839 | 985 | } |
---|
840 | 986 | |
---|
841 | 987 | static int |
---|
.. | .. |
---|
1018 | 1164 | } |
---|
1019 | 1165 | |
---|
1020 | 1166 | static void |
---|
1021 | | -icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) |
---|
| 1167 | +__icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr, |
---|
| 1168 | + bool force_rtd3) |
---|
1022 | 1169 | { |
---|
1023 | 1170 | const struct icm_tr_event_device_connected *pkg = |
---|
1024 | 1171 | (const struct icm_tr_event_device_connected *)hdr; |
---|
| 1172 | + bool authorized, boot, dual_lane, speed_gen3; |
---|
1025 | 1173 | enum tb_security_level security_level; |
---|
1026 | 1174 | struct tb_switch *sw, *parent_sw; |
---|
1027 | 1175 | struct tb_xdomain *xd; |
---|
1028 | | - bool authorized, boot; |
---|
1029 | 1176 | u64 route; |
---|
| 1177 | + |
---|
| 1178 | + icm_postpone_rescan(tb); |
---|
1030 | 1179 | |
---|
1031 | 1180 | /* |
---|
1032 | 1181 | * Currently we don't use the QoS information coming with the |
---|
.. | .. |
---|
1041 | 1190 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> |
---|
1042 | 1191 | ICM_FLAGS_SLEVEL_SHIFT; |
---|
1043 | 1192 | boot = pkg->link_info & ICM_LINK_INFO_BOOT; |
---|
| 1193 | + dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; |
---|
| 1194 | + speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; |
---|
1044 | 1195 | |
---|
1045 | 1196 | if (pkg->link_info & ICM_LINK_INFO_REJECTED) { |
---|
1046 | 1197 | tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n", |
---|
.. | .. |
---|
1083 | 1234 | return; |
---|
1084 | 1235 | } |
---|
1085 | 1236 | |
---|
1086 | | - add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, |
---|
1087 | | - sizeof(pkg->ep_name), pkg->connection_id, |
---|
1088 | | - 0, 0, 0, security_level, authorized, boot); |
---|
| 1237 | + pm_runtime_get_sync(&parent_sw->dev); |
---|
| 1238 | + |
---|
| 1239 | + sw = alloc_switch(parent_sw, route, &pkg->ep_uuid); |
---|
| 1240 | + if (!IS_ERR(sw)) { |
---|
| 1241 | + sw->connection_id = pkg->connection_id; |
---|
| 1242 | + sw->authorized = authorized; |
---|
| 1243 | + sw->security_level = security_level; |
---|
| 1244 | + sw->boot = boot; |
---|
| 1245 | + sw->link_speed = speed_gen3 ? 20 : 10; |
---|
| 1246 | + sw->link_width = dual_lane ? 2 : 1; |
---|
| 1247 | + sw->rpm = force_rtd3; |
---|
| 1248 | + if (!sw->rpm) |
---|
| 1249 | + sw->rpm = intel_vss_is_rtd3(pkg->ep_name, |
---|
| 1250 | + sizeof(pkg->ep_name)); |
---|
| 1251 | + |
---|
| 1252 | + if (add_switch(parent_sw, sw)) |
---|
| 1253 | + tb_switch_put(sw); |
---|
| 1254 | + } |
---|
| 1255 | + |
---|
| 1256 | + pm_runtime_mark_last_busy(&parent_sw->dev); |
---|
| 1257 | + pm_runtime_put_autosuspend(&parent_sw->dev); |
---|
1089 | 1258 | |
---|
1090 | 1259 | tb_switch_put(parent_sw); |
---|
| 1260 | +} |
---|
| 1261 | + |
---|
| 1262 | +static void |
---|
| 1263 | +icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) |
---|
| 1264 | +{ |
---|
| 1265 | + __icm_tr_device_connected(tb, hdr, false); |
---|
1091 | 1266 | } |
---|
1092 | 1267 | |
---|
1093 | 1268 | static void |
---|
.. | .. |
---|
1203 | 1378 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: |
---|
1204 | 1379 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: |
---|
1205 | 1380 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: |
---|
| 1381 | + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: |
---|
| 1382 | + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: |
---|
1206 | 1383 | return parent; |
---|
1207 | 1384 | } |
---|
1208 | 1385 | |
---|
.. | .. |
---|
1217 | 1394 | /* |
---|
1218 | 1395 | * Starting from Alpine Ridge we can use ICM on Apple machines |
---|
1219 | 1396 | * as well. We just need to reset and re-enable it first. |
---|
| 1397 | + * However, only start it if explicitly asked by the user. |
---|
1220 | 1398 | */ |
---|
1221 | | - if (!x86_apple_machine) |
---|
| 1399 | + if (icm_firmware_running(tb->nhi)) |
---|
1222 | 1400 | return true; |
---|
| 1401 | + if (!start_icm) |
---|
| 1402 | + return false; |
---|
1223 | 1403 | |
---|
1224 | 1404 | /* |
---|
1225 | 1405 | * Find the upstream PCIe port in case we need to do reset |
---|
.. | .. |
---|
1240 | 1420 | } |
---|
1241 | 1421 | |
---|
1242 | 1422 | return false; |
---|
| 1423 | +} |
---|
| 1424 | + |
---|
| 1425 | +static int icm_ar_cio_reset(struct tb *tb) |
---|
| 1426 | +{ |
---|
| 1427 | + return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9)); |
---|
1243 | 1428 | } |
---|
1244 | 1429 | |
---|
1245 | 1430 | static int icm_ar_get_mode(struct tb *tb) |
---|
.. | .. |
---|
1393 | 1578 | return 0; |
---|
1394 | 1579 | } |
---|
1395 | 1580 | |
---|
| 1581 | +static int |
---|
| 1582 | +icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
---|
| 1583 | + size_t *nboot_acl, bool *rpm) |
---|
| 1584 | +{ |
---|
| 1585 | + struct icm_tr_pkg_driver_ready_response reply; |
---|
| 1586 | + struct icm_pkg_driver_ready request = { |
---|
| 1587 | + .hdr.code = ICM_DRIVER_READY, |
---|
| 1588 | + }; |
---|
| 1589 | + int ret; |
---|
| 1590 | + |
---|
| 1591 | + memset(&reply, 0, sizeof(reply)); |
---|
| 1592 | + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), |
---|
| 1593 | + 1, 20000); |
---|
| 1594 | + if (ret) |
---|
| 1595 | + return ret; |
---|
| 1596 | + |
---|
| 1597 | + /* Ice Lake always supports RTD3 */ |
---|
| 1598 | + if (rpm) |
---|
| 1599 | + *rpm = true; |
---|
| 1600 | + |
---|
| 1601 | + return 0; |
---|
| 1602 | +} |
---|
| 1603 | + |
---|
| 1604 | +static void icm_icl_set_uuid(struct tb *tb) |
---|
| 1605 | +{ |
---|
| 1606 | + struct tb_nhi *nhi = tb->nhi; |
---|
| 1607 | + u32 uuid[4]; |
---|
| 1608 | + |
---|
| 1609 | + pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]); |
---|
| 1610 | + pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]); |
---|
| 1611 | + uuid[2] = 0xffffffff; |
---|
| 1612 | + uuid[3] = 0xffffffff; |
---|
| 1613 | + |
---|
| 1614 | + tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); |
---|
| 1615 | +} |
---|
| 1616 | + |
---|
| 1617 | +static void |
---|
| 1618 | +icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) |
---|
| 1619 | +{ |
---|
| 1620 | + __icm_tr_device_connected(tb, hdr, true); |
---|
| 1621 | +} |
---|
| 1622 | + |
---|
| 1623 | +static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr) |
---|
| 1624 | +{ |
---|
| 1625 | + const struct icm_icl_event_rtd3_veto *pkg = |
---|
| 1626 | + (const struct icm_icl_event_rtd3_veto *)hdr; |
---|
| 1627 | + |
---|
| 1628 | + tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason); |
---|
| 1629 | + |
---|
| 1630 | + if (pkg->veto_reason) |
---|
| 1631 | + icm_veto_begin(tb); |
---|
| 1632 | + else |
---|
| 1633 | + icm_veto_end(tb); |
---|
| 1634 | +} |
---|
| 1635 | + |
---|
| 1636 | +static bool icm_tgl_is_supported(struct tb *tb) |
---|
| 1637 | +{ |
---|
| 1638 | + u32 val; |
---|
| 1639 | + |
---|
| 1640 | + /* |
---|
| 1641 | + * If the firmware is not running use software CM. This platform |
---|
| 1642 | + * should fully support both. |
---|
| 1643 | + */ |
---|
| 1644 | + val = ioread32(tb->nhi->iobase + REG_FW_STS); |
---|
| 1645 | + return !!(val & REG_FW_STS_NVM_AUTH_DONE); |
---|
| 1646 | +} |
---|
| 1647 | + |
---|
1396 | 1648 | static void icm_handle_notification(struct work_struct *work) |
---|
1397 | 1649 | { |
---|
1398 | 1650 | struct icm_notification *n = container_of(work, typeof(*n), work); |
---|
.. | .. |
---|
1419 | 1671 | break; |
---|
1420 | 1672 | case ICM_EVENT_XDOMAIN_DISCONNECTED: |
---|
1421 | 1673 | icm->xdomain_disconnected(tb, n->pkg); |
---|
| 1674 | + break; |
---|
| 1675 | + case ICM_EVENT_RTD3_VETO: |
---|
| 1676 | + icm->rtd3_veto(tb, n->pkg); |
---|
1422 | 1677 | break; |
---|
1423 | 1678 | } |
---|
1424 | 1679 | } |
---|
.. | .. |
---|
1479 | 1734 | return -ETIMEDOUT; |
---|
1480 | 1735 | } |
---|
1481 | 1736 | |
---|
1482 | | -static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) |
---|
1483 | | -{ |
---|
1484 | | - unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); |
---|
1485 | | - u32 cmd; |
---|
1486 | | - |
---|
1487 | | - do { |
---|
1488 | | - pci_read_config_dword(icm->upstream_port, |
---|
1489 | | - icm->vnd_cap + PCIE2CIO_CMD, &cmd); |
---|
1490 | | - if (!(cmd & PCIE2CIO_CMD_START)) { |
---|
1491 | | - if (cmd & PCIE2CIO_CMD_TIMEOUT) |
---|
1492 | | - break; |
---|
1493 | | - return 0; |
---|
1494 | | - } |
---|
1495 | | - |
---|
1496 | | - msleep(50); |
---|
1497 | | - } while (time_before(jiffies, end)); |
---|
1498 | | - |
---|
1499 | | - return -ETIMEDOUT; |
---|
1500 | | -} |
---|
1501 | | - |
---|
1502 | | -static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, |
---|
1503 | | - unsigned int port, unsigned int index, u32 *data) |
---|
1504 | | -{ |
---|
1505 | | - struct pci_dev *pdev = icm->upstream_port; |
---|
1506 | | - int ret, vnd_cap = icm->vnd_cap; |
---|
1507 | | - u32 cmd; |
---|
1508 | | - |
---|
1509 | | - cmd = index; |
---|
1510 | | - cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; |
---|
1511 | | - cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; |
---|
1512 | | - cmd |= PCIE2CIO_CMD_START; |
---|
1513 | | - pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); |
---|
1514 | | - |
---|
1515 | | - ret = pci2cio_wait_completion(icm, 5000); |
---|
1516 | | - if (ret) |
---|
1517 | | - return ret; |
---|
1518 | | - |
---|
1519 | | - pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); |
---|
1520 | | - return 0; |
---|
1521 | | -} |
---|
1522 | | - |
---|
1523 | | -static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, |
---|
1524 | | - unsigned int port, unsigned int index, u32 data) |
---|
1525 | | -{ |
---|
1526 | | - struct pci_dev *pdev = icm->upstream_port; |
---|
1527 | | - int vnd_cap = icm->vnd_cap; |
---|
1528 | | - u32 cmd; |
---|
1529 | | - |
---|
1530 | | - pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); |
---|
1531 | | - |
---|
1532 | | - cmd = index; |
---|
1533 | | - cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; |
---|
1534 | | - cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; |
---|
1535 | | - cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; |
---|
1536 | | - pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); |
---|
1537 | | - |
---|
1538 | | - return pci2cio_wait_completion(icm, 5000); |
---|
1539 | | -} |
---|
1540 | | - |
---|
1541 | 1737 | static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) |
---|
1542 | 1738 | { |
---|
1543 | 1739 | struct icm *icm = tb_priv(tb); |
---|
.. | .. |
---|
1558 | 1754 | iowrite32(val, nhi->iobase + REG_FW_STS); |
---|
1559 | 1755 | |
---|
1560 | 1756 | /* Trigger CIO reset now */ |
---|
1561 | | - return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9)); |
---|
| 1757 | + return icm->cio_reset(tb); |
---|
1562 | 1758 | } |
---|
1563 | 1759 | |
---|
1564 | 1760 | static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) |
---|
.. | .. |
---|
1568 | 1764 | u32 val; |
---|
1569 | 1765 | |
---|
1570 | 1766 | /* Check if the ICM firmware is already running */ |
---|
1571 | | - val = ioread32(nhi->iobase + REG_FW_STS); |
---|
1572 | | - if (val & REG_FW_STS_ICM_EN) |
---|
| 1767 | + if (icm_firmware_running(nhi)) |
---|
1573 | 1768 | return 0; |
---|
1574 | 1769 | |
---|
1575 | | - dev_info(&nhi->pdev->dev, "starting ICM firmware\n"); |
---|
| 1770 | + dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n"); |
---|
1576 | 1771 | |
---|
1577 | 1772 | ret = icm_firmware_reset(tb, nhi); |
---|
1578 | 1773 | if (ret) |
---|
.. | .. |
---|
1757 | 1952 | */ |
---|
1758 | 1953 | static void icm_unplug_children(struct tb_switch *sw) |
---|
1759 | 1954 | { |
---|
1760 | | - unsigned int i; |
---|
| 1955 | + struct tb_port *port; |
---|
1761 | 1956 | |
---|
1762 | 1957 | if (tb_route(sw)) |
---|
1763 | 1958 | sw->is_unplugged = true; |
---|
1764 | 1959 | |
---|
1765 | | - for (i = 1; i <= sw->config.max_port_number; i++) { |
---|
1766 | | - struct tb_port *port = &sw->ports[i]; |
---|
1767 | | - |
---|
1768 | | - if (tb_is_upstream_port(port)) |
---|
1769 | | - continue; |
---|
1770 | | - if (port->xdomain) { |
---|
| 1960 | + tb_switch_for_each_port(sw, port) { |
---|
| 1961 | + if (port->xdomain) |
---|
1771 | 1962 | port->xdomain->is_unplugged = true; |
---|
1772 | | - continue; |
---|
1773 | | - } |
---|
1774 | | - if (!port->remote) |
---|
1775 | | - continue; |
---|
1776 | | - |
---|
1777 | | - icm_unplug_children(port->remote->sw); |
---|
| 1963 | + else if (tb_port_has_remote(port)) |
---|
| 1964 | + icm_unplug_children(port->remote->sw); |
---|
1778 | 1965 | } |
---|
| 1966 | +} |
---|
| 1967 | + |
---|
| 1968 | +static int complete_rpm(struct device *dev, void *data) |
---|
| 1969 | +{ |
---|
| 1970 | + struct tb_switch *sw = tb_to_switch(dev); |
---|
| 1971 | + |
---|
| 1972 | + if (sw) |
---|
| 1973 | + complete(&sw->rpm_complete); |
---|
| 1974 | + return 0; |
---|
| 1975 | +} |
---|
| 1976 | + |
---|
| 1977 | +static void remove_unplugged_switch(struct tb_switch *sw) |
---|
| 1978 | +{ |
---|
| 1979 | + struct device *parent = get_device(sw->dev.parent); |
---|
| 1980 | + |
---|
| 1981 | + pm_runtime_get_sync(parent); |
---|
| 1982 | + |
---|
| 1983 | + /* |
---|
| 1984 | + * Signal this and switches below for rpm_complete because |
---|
| 1985 | + * tb_switch_remove() calls pm_runtime_get_sync() that then waits |
---|
| 1986 | + * for it. |
---|
| 1987 | + */ |
---|
| 1988 | + complete_rpm(&sw->dev, NULL); |
---|
| 1989 | + bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); |
---|
| 1990 | + tb_switch_remove(sw); |
---|
| 1991 | + |
---|
| 1992 | + pm_runtime_mark_last_busy(parent); |
---|
| 1993 | + pm_runtime_put_autosuspend(parent); |
---|
| 1994 | + |
---|
| 1995 | + put_device(parent); |
---|
1779 | 1996 | } |
---|
1780 | 1997 | |
---|
1781 | 1998 | static void icm_free_unplugged_children(struct tb_switch *sw) |
---|
1782 | 1999 | { |
---|
1783 | | - unsigned int i; |
---|
| 2000 | + struct tb_port *port; |
---|
1784 | 2001 | |
---|
1785 | | - for (i = 1; i <= sw->config.max_port_number; i++) { |
---|
1786 | | - struct tb_port *port = &sw->ports[i]; |
---|
1787 | | - |
---|
1788 | | - if (tb_is_upstream_port(port)) |
---|
1789 | | - continue; |
---|
1790 | | - |
---|
| 2002 | + tb_switch_for_each_port(sw, port) { |
---|
1791 | 2003 | if (port->xdomain && port->xdomain->is_unplugged) { |
---|
1792 | 2004 | tb_xdomain_remove(port->xdomain); |
---|
1793 | 2005 | port->xdomain = NULL; |
---|
1794 | | - continue; |
---|
1795 | | - } |
---|
1796 | | - |
---|
1797 | | - if (!port->remote) |
---|
1798 | | - continue; |
---|
1799 | | - |
---|
1800 | | - if (port->remote->sw->is_unplugged) { |
---|
1801 | | - tb_switch_remove(port->remote->sw); |
---|
1802 | | - port->remote = NULL; |
---|
1803 | | - } else { |
---|
1804 | | - icm_free_unplugged_children(port->remote->sw); |
---|
| 2006 | + } else if (tb_port_has_remote(port)) { |
---|
| 2007 | + if (port->remote->sw->is_unplugged) { |
---|
| 2008 | + remove_unplugged_switch(port->remote->sw); |
---|
| 2009 | + port->remote = NULL; |
---|
| 2010 | + } else { |
---|
| 2011 | + icm_free_unplugged_children(port->remote->sw); |
---|
| 2012 | + } |
---|
1805 | 2013 | } |
---|
1806 | 2014 | } |
---|
1807 | 2015 | } |
---|
.. | .. |
---|
1824 | 2032 | if (tb->nhi->going_away) |
---|
1825 | 2033 | return; |
---|
1826 | 2034 | |
---|
| 2035 | + /* |
---|
| 2036 | + * If RTD3 was vetoed before we entered system suspend allow it |
---|
| 2037 | + * again now before driver ready is sent. Firmware sends a new RTD3 |
---|
| 2038 | + * veto if it is still the case after we have sent it driver ready |
---|
| 2039 | + * command. |
---|
| 2040 | + */ |
---|
| 2041 | + icm_veto_end(tb); |
---|
1827 | 2042 | icm_unplug_children(tb->root_switch); |
---|
1828 | 2043 | |
---|
1829 | 2044 | /* |
---|
.. | .. |
---|
1846 | 2061 | return 0; |
---|
1847 | 2062 | } |
---|
1848 | 2063 | |
---|
| 2064 | +static int icm_runtime_suspend_switch(struct tb_switch *sw) |
---|
| 2065 | +{ |
---|
| 2066 | + if (tb_route(sw)) |
---|
| 2067 | + reinit_completion(&sw->rpm_complete); |
---|
| 2068 | + return 0; |
---|
| 2069 | +} |
---|
| 2070 | + |
---|
| 2071 | +static int icm_runtime_resume_switch(struct tb_switch *sw) |
---|
| 2072 | +{ |
---|
| 2073 | + if (tb_route(sw)) { |
---|
| 2074 | + if (!wait_for_completion_timeout(&sw->rpm_complete, |
---|
| 2075 | + msecs_to_jiffies(500))) { |
---|
| 2076 | + dev_dbg(&sw->dev, "runtime resuming timed out\n"); |
---|
| 2077 | + } |
---|
| 2078 | + } |
---|
| 2079 | + return 0; |
---|
| 2080 | +} |
---|
| 2081 | + |
---|
1849 | 2082 | static int icm_runtime_resume(struct tb *tb) |
---|
1850 | 2083 | { |
---|
1851 | 2084 | /* |
---|
.. | .. |
---|
1865 | 2098 | tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); |
---|
1866 | 2099 | else |
---|
1867 | 2100 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); |
---|
1868 | | - if (!tb->root_switch) |
---|
1869 | | - return -ENODEV; |
---|
| 2101 | + if (IS_ERR(tb->root_switch)) |
---|
| 2102 | + return PTR_ERR(tb->root_switch); |
---|
1870 | 2103 | |
---|
1871 | | - /* |
---|
1872 | | - * NVM upgrade has not been tested on Apple systems and they |
---|
1873 | | - * don't provide images publicly either. To be on the safe side |
---|
1874 | | - * prevent root switch NVM upgrade on Macs for now. |
---|
1875 | | - */ |
---|
1876 | | - tb->root_switch->no_nvm_upgrade = x86_apple_machine; |
---|
| 2104 | + tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm; |
---|
1877 | 2105 | tb->root_switch->rpm = icm->rpm; |
---|
| 2106 | + |
---|
| 2107 | + if (icm->set_uuid) |
---|
| 2108 | + icm->set_uuid(tb); |
---|
1878 | 2109 | |
---|
1879 | 2110 | ret = tb_switch_add(tb->root_switch); |
---|
1880 | 2111 | if (ret) { |
---|
.. | .. |
---|
1925 | 2156 | .complete = icm_complete, |
---|
1926 | 2157 | .runtime_suspend = icm_runtime_suspend, |
---|
1927 | 2158 | .runtime_resume = icm_runtime_resume, |
---|
| 2159 | + .runtime_suspend_switch = icm_runtime_suspend_switch, |
---|
| 2160 | + .runtime_resume_switch = icm_runtime_resume_switch, |
---|
1928 | 2161 | .handle_event = icm_handle_event, |
---|
1929 | 2162 | .get_boot_acl = icm_ar_get_boot_acl, |
---|
1930 | 2163 | .set_boot_acl = icm_ar_set_boot_acl, |
---|
.. | .. |
---|
1945 | 2178 | .complete = icm_complete, |
---|
1946 | 2179 | .runtime_suspend = icm_runtime_suspend, |
---|
1947 | 2180 | .runtime_resume = icm_runtime_resume, |
---|
| 2181 | + .runtime_suspend_switch = icm_runtime_suspend_switch, |
---|
| 2182 | + .runtime_resume_switch = icm_runtime_resume_switch, |
---|
1948 | 2183 | .handle_event = icm_handle_event, |
---|
1949 | 2184 | .get_boot_acl = icm_ar_get_boot_acl, |
---|
1950 | 2185 | .set_boot_acl = icm_ar_set_boot_acl, |
---|
.. | .. |
---|
1952 | 2187 | .add_switch_key = icm_tr_add_switch_key, |
---|
1953 | 2188 | .challenge_switch_key = icm_tr_challenge_switch_key, |
---|
1954 | 2189 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, |
---|
| 2190 | + .approve_xdomain_paths = icm_tr_approve_xdomain_paths, |
---|
| 2191 | + .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, |
---|
| 2192 | +}; |
---|
| 2193 | + |
---|
| 2194 | +/* Ice Lake */ |
---|
| 2195 | +static const struct tb_cm_ops icm_icl_ops = { |
---|
| 2196 | + .driver_ready = icm_driver_ready, |
---|
| 2197 | + .start = icm_start, |
---|
| 2198 | + .stop = icm_stop, |
---|
| 2199 | + .complete = icm_complete, |
---|
| 2200 | + .runtime_suspend = icm_runtime_suspend, |
---|
| 2201 | + .runtime_resume = icm_runtime_resume, |
---|
| 2202 | + .handle_event = icm_handle_event, |
---|
1955 | 2203 | .approve_xdomain_paths = icm_tr_approve_xdomain_paths, |
---|
1956 | 2204 | .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, |
---|
1957 | 2205 | }; |
---|
.. | .. |
---|
1972 | 2220 | switch (nhi->pdev->device) { |
---|
1973 | 2221 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: |
---|
1974 | 2222 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: |
---|
| 2223 | + icm->can_upgrade_nvm = true; |
---|
1975 | 2224 | icm->is_supported = icm_fr_is_supported; |
---|
1976 | 2225 | icm->get_route = icm_fr_get_route; |
---|
1977 | 2226 | icm->save_devices = icm_fr_save_devices; |
---|
.. | .. |
---|
1989 | 2238 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: |
---|
1990 | 2239 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: |
---|
1991 | 2240 | icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; |
---|
| 2241 | + /* |
---|
| 2242 | + * NVM upgrade has not been tested on Apple systems and |
---|
| 2243 | + * they don't provide images publicly either. To be on |
---|
| 2244 | + * the safe side prevent root switch NVM upgrade on Macs |
---|
| 2245 | + * for now. |
---|
| 2246 | + */ |
---|
| 2247 | + icm->can_upgrade_nvm = !x86_apple_machine; |
---|
1992 | 2248 | icm->is_supported = icm_ar_is_supported; |
---|
| 2249 | + icm->cio_reset = icm_ar_cio_reset; |
---|
1993 | 2250 | icm->get_mode = icm_ar_get_mode; |
---|
1994 | 2251 | icm->get_route = icm_ar_get_route; |
---|
1995 | 2252 | icm->save_devices = icm_fr_save_devices; |
---|
.. | .. |
---|
2004 | 2261 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: |
---|
2005 | 2262 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: |
---|
2006 | 2263 | icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; |
---|
| 2264 | + icm->can_upgrade_nvm = !x86_apple_machine; |
---|
2007 | 2265 | icm->is_supported = icm_ar_is_supported; |
---|
| 2266 | + icm->cio_reset = icm_tr_cio_reset; |
---|
| 2267 | + icm->get_mode = icm_ar_get_mode; |
---|
| 2268 | + icm->driver_ready = icm_tr_driver_ready; |
---|
| 2269 | + icm->device_connected = icm_tr_device_connected; |
---|
| 2270 | + icm->device_disconnected = icm_tr_device_disconnected; |
---|
| 2271 | + icm->xdomain_connected = icm_tr_xdomain_connected; |
---|
| 2272 | + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; |
---|
| 2273 | + tb->cm_ops = &icm_tr_ops; |
---|
| 2274 | + break; |
---|
| 2275 | + |
---|
| 2276 | + case PCI_DEVICE_ID_INTEL_ICL_NHI0: |
---|
| 2277 | + case PCI_DEVICE_ID_INTEL_ICL_NHI1: |
---|
| 2278 | + icm->is_supported = icm_fr_is_supported; |
---|
| 2279 | + icm->driver_ready = icm_icl_driver_ready; |
---|
| 2280 | + icm->set_uuid = icm_icl_set_uuid; |
---|
| 2281 | + icm->device_connected = icm_icl_device_connected; |
---|
| 2282 | + icm->device_disconnected = icm_tr_device_disconnected; |
---|
| 2283 | + icm->xdomain_connected = icm_tr_xdomain_connected; |
---|
| 2284 | + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; |
---|
| 2285 | + icm->rtd3_veto = icm_icl_rtd3_veto; |
---|
| 2286 | + tb->cm_ops = &icm_icl_ops; |
---|
| 2287 | + break; |
---|
| 2288 | + |
---|
| 2289 | + case PCI_DEVICE_ID_INTEL_TGL_NHI0: |
---|
| 2290 | + case PCI_DEVICE_ID_INTEL_TGL_NHI1: |
---|
| 2291 | + case PCI_DEVICE_ID_INTEL_TGL_H_NHI0: |
---|
| 2292 | + case PCI_DEVICE_ID_INTEL_TGL_H_NHI1: |
---|
| 2293 | + icm->is_supported = icm_tgl_is_supported; |
---|
| 2294 | + icm->driver_ready = icm_icl_driver_ready; |
---|
| 2295 | + icm->set_uuid = icm_icl_set_uuid; |
---|
| 2296 | + icm->device_connected = icm_icl_device_connected; |
---|
| 2297 | + icm->device_disconnected = icm_tr_device_disconnected; |
---|
| 2298 | + icm->xdomain_connected = icm_tr_xdomain_connected; |
---|
| 2299 | + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; |
---|
| 2300 | + icm->rtd3_veto = icm_icl_rtd3_veto; |
---|
| 2301 | + tb->cm_ops = &icm_icl_ops; |
---|
| 2302 | + break; |
---|
| 2303 | + |
---|
| 2304 | + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI: |
---|
| 2305 | + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI: |
---|
| 2306 | + icm->is_supported = icm_tgl_is_supported; |
---|
2008 | 2307 | icm->get_mode = icm_ar_get_mode; |
---|
2009 | 2308 | icm->driver_ready = icm_tr_driver_ready; |
---|
2010 | 2309 | icm->device_connected = icm_tr_device_connected; |
---|