.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
---|
2 | 2 | /* |
---|
3 | | - * Thunderbolt Cactus Ridge driver - bus logic (NHI independent) |
---|
| 3 | + * Thunderbolt driver - bus logic (NHI independent) |
---|
4 | 4 | * |
---|
5 | 5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
---|
| 6 | + * Copyright (C) 2019, Intel Corporation |
---|
6 | 7 | */ |
---|
7 | 8 | |
---|
8 | 9 | #include <linux/slab.h> |
---|
9 | 10 | #include <linux/errno.h> |
---|
10 | 11 | #include <linux/delay.h> |
---|
11 | | -#include <linux/platform_data/x86/apple.h> |
---|
| 12 | +#include <linux/pm_runtime.h> |
---|
12 | 13 | |
---|
13 | 14 | #include "tb.h" |
---|
14 | 15 | #include "tb_regs.h" |
---|
15 | | -#include "tunnel_pci.h" |
---|
| 16 | +#include "tunnel.h" |
---|
16 | 17 | |
---|
17 | 18 | /** |
---|
18 | 19 | * struct tb_cm - Simple Thunderbolt connection manager |
---|
19 | 20 | * @tunnel_list: List of active tunnels |
---|
| 21 | + * @dp_resources: List of available DP resources for DP tunneling |
---|
20 | 22 | * @hotplug_active: tb_handle_hotplug will stop progressing plug |
---|
21 | 23 | * events and exit if this is not set (it needs to |
---|
22 | 24 | * acquire the lock one more time). Used to drain wq |
---|
23 | 25 | * after cfg has been paused. |
---|
| 26 | + * @remove_work: Work used to remove any unplugged routers after |
---|
| 27 | + * runtime resume |
---|
24 | 28 | */ |
---|
25 | 29 | struct tb_cm { |
---|
26 | 30 | struct list_head tunnel_list; |
---|
| 31 | + struct list_head dp_resources; |
---|
27 | 32 | bool hotplug_active; |
---|
| 33 | + struct delayed_work remove_work; |
---|
28 | 34 | }; |
---|
| 35 | + |
---|
| 36 | +static inline struct tb *tcm_to_tb(struct tb_cm *tcm) |
---|
| 37 | +{ |
---|
| 38 | + return ((void *)tcm - sizeof(struct tb)); |
---|
| 39 | +} |
---|
| 40 | + |
---|
| 41 | +struct tb_hotplug_event { |
---|
| 42 | + struct work_struct work; |
---|
| 43 | + struct tb *tb; |
---|
| 44 | + u64 route; |
---|
| 45 | + u8 port; |
---|
| 46 | + bool unplug; |
---|
| 47 | +}; |
---|
| 48 | + |
---|
| 49 | +static void tb_handle_hotplug(struct work_struct *work); |
---|
| 50 | + |
---|
| 51 | +static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) |
---|
| 52 | +{ |
---|
| 53 | + struct tb_hotplug_event *ev; |
---|
| 54 | + |
---|
| 55 | + ev = kmalloc(sizeof(*ev), GFP_KERNEL); |
---|
| 56 | + if (!ev) |
---|
| 57 | + return; |
---|
| 58 | + |
---|
| 59 | + ev->tb = tb; |
---|
| 60 | + ev->route = route; |
---|
| 61 | + ev->port = port; |
---|
| 62 | + ev->unplug = unplug; |
---|
| 63 | + INIT_WORK(&ev->work, tb_handle_hotplug); |
---|
| 64 | + queue_work(tb->wq, &ev->work); |
---|
| 65 | +} |
---|
29 | 66 | |
---|
30 | 67 | /* enumeration & hot plug handling */ |
---|
31 | 68 | |
---|
| 69 | +static void tb_add_dp_resources(struct tb_switch *sw) |
---|
| 70 | +{ |
---|
| 71 | + struct tb_cm *tcm = tb_priv(sw->tb); |
---|
| 72 | + struct tb_port *port; |
---|
| 73 | + |
---|
| 74 | + tb_switch_for_each_port(sw, port) { |
---|
| 75 | + if (!tb_port_is_dpin(port)) |
---|
| 76 | + continue; |
---|
| 77 | + |
---|
| 78 | + if (!tb_switch_query_dp_resource(sw, port)) |
---|
| 79 | + continue; |
---|
| 80 | + |
---|
| 81 | + list_add_tail(&port->list, &tcm->dp_resources); |
---|
| 82 | + tb_port_dbg(port, "DP IN resource available\n"); |
---|
| 83 | + } |
---|
| 84 | +} |
---|
| 85 | + |
---|
| 86 | +static void tb_remove_dp_resources(struct tb_switch *sw) |
---|
| 87 | +{ |
---|
| 88 | + struct tb_cm *tcm = tb_priv(sw->tb); |
---|
| 89 | + struct tb_port *port, *tmp; |
---|
| 90 | + |
---|
| 91 | + /* Clear children resources first */ |
---|
| 92 | + tb_switch_for_each_port(sw, port) { |
---|
| 93 | + if (tb_port_has_remote(port)) |
---|
| 94 | + tb_remove_dp_resources(port->remote->sw); |
---|
| 95 | + } |
---|
| 96 | + |
---|
| 97 | + list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { |
---|
| 98 | + if (port->sw == sw) { |
---|
| 99 | + tb_port_dbg(port, "DP OUT resource unavailable\n"); |
---|
| 100 | + list_del_init(&port->list); |
---|
| 101 | + } |
---|
| 102 | + } |
---|
| 103 | +} |
---|
| 104 | + |
---|
| 105 | +static void tb_discover_tunnels(struct tb_switch *sw) |
---|
| 106 | +{ |
---|
| 107 | + struct tb *tb = sw->tb; |
---|
| 108 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 109 | + struct tb_port *port; |
---|
| 110 | + |
---|
| 111 | + tb_switch_for_each_port(sw, port) { |
---|
| 112 | + struct tb_tunnel *tunnel = NULL; |
---|
| 113 | + |
---|
| 114 | + switch (port->config.type) { |
---|
| 115 | + case TB_TYPE_DP_HDMI_IN: |
---|
| 116 | + tunnel = tb_tunnel_discover_dp(tb, port); |
---|
| 117 | + break; |
---|
| 118 | + |
---|
| 119 | + case TB_TYPE_PCIE_DOWN: |
---|
| 120 | + tunnel = tb_tunnel_discover_pci(tb, port); |
---|
| 121 | + break; |
---|
| 122 | + |
---|
| 123 | + case TB_TYPE_USB3_DOWN: |
---|
| 124 | + tunnel = tb_tunnel_discover_usb3(tb, port); |
---|
| 125 | + break; |
---|
| 126 | + |
---|
| 127 | + default: |
---|
| 128 | + break; |
---|
| 129 | + } |
---|
| 130 | + |
---|
| 131 | + if (!tunnel) |
---|
| 132 | + continue; |
---|
| 133 | + |
---|
| 134 | + if (tb_tunnel_is_pci(tunnel)) { |
---|
| 135 | + struct tb_switch *parent = tunnel->dst_port->sw; |
---|
| 136 | + |
---|
| 137 | + while (parent != tunnel->src_port->sw) { |
---|
| 138 | + parent->boot = true; |
---|
| 139 | + parent = tb_switch_parent(parent); |
---|
| 140 | + } |
---|
| 141 | + } else if (tb_tunnel_is_dp(tunnel)) { |
---|
| 142 | + /* Keep the domain from powering down */ |
---|
| 143 | + pm_runtime_get_sync(&tunnel->src_port->sw->dev); |
---|
| 144 | + pm_runtime_get_sync(&tunnel->dst_port->sw->dev); |
---|
| 145 | + } |
---|
| 146 | + |
---|
| 147 | + list_add_tail(&tunnel->list, &tcm->tunnel_list); |
---|
| 148 | + } |
---|
| 149 | + |
---|
| 150 | + tb_switch_for_each_port(sw, port) { |
---|
| 151 | + if (tb_port_has_remote(port)) |
---|
| 152 | + tb_discover_tunnels(port->remote->sw); |
---|
| 153 | + } |
---|
| 154 | +} |
---|
| 155 | + |
---|
| 156 | +static int tb_port_configure_xdomain(struct tb_port *port) |
---|
| 157 | +{ |
---|
| 158 | + /* |
---|
| 159 | + * XDomain paths currently only support single lane so we must |
---|
| 160 | + * disable the other lane according to USB4 spec. |
---|
| 161 | + */ |
---|
| 162 | + tb_port_disable(port->dual_link_port); |
---|
| 163 | + |
---|
| 164 | + if (tb_switch_is_usb4(port->sw)) |
---|
| 165 | + return usb4_port_configure_xdomain(port); |
---|
| 166 | + return tb_lc_configure_xdomain(port); |
---|
| 167 | +} |
---|
| 168 | + |
---|
| 169 | +static void tb_port_unconfigure_xdomain(struct tb_port *port) |
---|
| 170 | +{ |
---|
| 171 | + if (tb_switch_is_usb4(port->sw)) |
---|
| 172 | + usb4_port_unconfigure_xdomain(port); |
---|
| 173 | + else |
---|
| 174 | + tb_lc_unconfigure_xdomain(port); |
---|
| 175 | + |
---|
| 176 | + tb_port_enable(port->dual_link_port); |
---|
| 177 | +} |
---|
| 178 | + |
---|
| 179 | +static void tb_scan_xdomain(struct tb_port *port) |
---|
| 180 | +{ |
---|
| 181 | + struct tb_switch *sw = port->sw; |
---|
| 182 | + struct tb *tb = sw->tb; |
---|
| 183 | + struct tb_xdomain *xd; |
---|
| 184 | + u64 route; |
---|
| 185 | + |
---|
| 186 | + route = tb_downstream_route(port); |
---|
| 187 | + xd = tb_xdomain_find_by_route(tb, route); |
---|
| 188 | + if (xd) { |
---|
| 189 | + tb_xdomain_put(xd); |
---|
| 190 | + return; |
---|
| 191 | + } |
---|
| 192 | + |
---|
| 193 | + xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, |
---|
| 194 | + NULL); |
---|
| 195 | + if (xd) { |
---|
| 196 | + tb_port_at(route, sw)->xdomain = xd; |
---|
| 197 | + tb_port_configure_xdomain(port); |
---|
| 198 | + tb_xdomain_add(xd); |
---|
| 199 | + } |
---|
| 200 | +} |
---|
| 201 | + |
---|
| 202 | +static int tb_enable_tmu(struct tb_switch *sw) |
---|
| 203 | +{ |
---|
| 204 | + int ret; |
---|
| 205 | + |
---|
| 206 | + /* If it is already enabled in correct mode, don't touch it */ |
---|
| 207 | + if (tb_switch_tmu_is_enabled(sw)) |
---|
| 208 | + return 0; |
---|
| 209 | + |
---|
| 210 | + ret = tb_switch_tmu_disable(sw); |
---|
| 211 | + if (ret) |
---|
| 212 | + return ret; |
---|
| 213 | + |
---|
| 214 | + ret = tb_switch_tmu_post_time(sw); |
---|
| 215 | + if (ret) |
---|
| 216 | + return ret; |
---|
| 217 | + |
---|
| 218 | + return tb_switch_tmu_enable(sw); |
---|
| 219 | +} |
---|
| 220 | + |
---|
| 221 | +/** |
---|
| 222 | + * tb_find_unused_port() - return the first inactive port on @sw |
---|
| 223 | + * @sw: Switch to find the port on |
---|
| 224 | + * @type: Port type to look for |
---|
| 225 | + */ |
---|
| 226 | +static struct tb_port *tb_find_unused_port(struct tb_switch *sw, |
---|
| 227 | + enum tb_port_type type) |
---|
| 228 | +{ |
---|
| 229 | + struct tb_port *port; |
---|
| 230 | + |
---|
| 231 | + tb_switch_for_each_port(sw, port) { |
---|
| 232 | + if (tb_is_upstream_port(port)) |
---|
| 233 | + continue; |
---|
| 234 | + if (port->config.type != type) |
---|
| 235 | + continue; |
---|
| 236 | + if (!port->cap_adap) |
---|
| 237 | + continue; |
---|
| 238 | + if (tb_port_is_enabled(port)) |
---|
| 239 | + continue; |
---|
| 240 | + return port; |
---|
| 241 | + } |
---|
| 242 | + return NULL; |
---|
| 243 | +} |
---|
| 244 | + |
---|
| 245 | +static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, |
---|
| 246 | + const struct tb_port *port) |
---|
| 247 | +{ |
---|
| 248 | + struct tb_port *down; |
---|
| 249 | + |
---|
| 250 | + down = usb4_switch_map_usb3_down(sw, port); |
---|
| 251 | + if (down && !tb_usb3_port_is_enabled(down)) |
---|
| 252 | + return down; |
---|
| 253 | + return NULL; |
---|
| 254 | +} |
---|
| 255 | + |
---|
| 256 | +static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, |
---|
| 257 | + struct tb_port *src_port, |
---|
| 258 | + struct tb_port *dst_port) |
---|
| 259 | +{ |
---|
| 260 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 261 | + struct tb_tunnel *tunnel; |
---|
| 262 | + |
---|
| 263 | + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
---|
| 264 | + if (tunnel->type == type && |
---|
| 265 | + ((src_port && src_port == tunnel->src_port) || |
---|
| 266 | + (dst_port && dst_port == tunnel->dst_port))) { |
---|
| 267 | + return tunnel; |
---|
| 268 | + } |
---|
| 269 | + } |
---|
| 270 | + |
---|
| 271 | + return NULL; |
---|
| 272 | +} |
---|
| 273 | + |
---|
| 274 | +static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, |
---|
| 275 | + struct tb_port *src_port, |
---|
| 276 | + struct tb_port *dst_port) |
---|
| 277 | +{ |
---|
| 278 | + struct tb_port *port, *usb3_down; |
---|
| 279 | + struct tb_switch *sw; |
---|
| 280 | + |
---|
| 281 | + /* Pick the router that is deepest in the topology */ |
---|
| 282 | + if (dst_port->sw->config.depth > src_port->sw->config.depth) |
---|
| 283 | + sw = dst_port->sw; |
---|
| 284 | + else |
---|
| 285 | + sw = src_port->sw; |
---|
| 286 | + |
---|
| 287 | + /* Can't be the host router */ |
---|
| 288 | + if (sw == tb->root_switch) |
---|
| 289 | + return NULL; |
---|
| 290 | + |
---|
| 291 | + /* Find the downstream USB4 port that leads to this router */ |
---|
| 292 | + port = tb_port_at(tb_route(sw), tb->root_switch); |
---|
| 293 | + /* Find the corresponding host router USB3 downstream port */ |
---|
| 294 | + usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); |
---|
| 295 | + if (!usb3_down) |
---|
| 296 | + return NULL; |
---|
| 297 | + |
---|
| 298 | + return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); |
---|
| 299 | +} |
---|
| 300 | + |
---|
| 301 | +static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, |
---|
| 302 | + struct tb_port *dst_port, int *available_up, int *available_down) |
---|
| 303 | +{ |
---|
| 304 | + int usb3_consumed_up, usb3_consumed_down, ret; |
---|
| 305 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 306 | + struct tb_tunnel *tunnel; |
---|
| 307 | + struct tb_port *port; |
---|
| 308 | + |
---|
| 309 | + tb_port_dbg(dst_port, "calculating available bandwidth\n"); |
---|
| 310 | + |
---|
| 311 | + tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); |
---|
| 312 | + if (tunnel) { |
---|
| 313 | + ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, |
---|
| 314 | + &usb3_consumed_down); |
---|
| 315 | + if (ret) |
---|
| 316 | + return ret; |
---|
| 317 | + } else { |
---|
| 318 | + usb3_consumed_up = 0; |
---|
| 319 | + usb3_consumed_down = 0; |
---|
| 320 | + } |
---|
| 321 | + |
---|
| 322 | + *available_up = *available_down = 40000; |
---|
| 323 | + |
---|
| 324 | + /* Find the minimum available bandwidth over all links */ |
---|
| 325 | + tb_for_each_port_on_path(src_port, dst_port, port) { |
---|
| 326 | + int link_speed, link_width, up_bw, down_bw; |
---|
| 327 | + |
---|
| 328 | + if (!tb_port_is_null(port)) |
---|
| 329 | + continue; |
---|
| 330 | + |
---|
| 331 | + if (tb_is_upstream_port(port)) { |
---|
| 332 | + link_speed = port->sw->link_speed; |
---|
| 333 | + } else { |
---|
| 334 | + link_speed = tb_port_get_link_speed(port); |
---|
| 335 | + if (link_speed < 0) |
---|
| 336 | + return link_speed; |
---|
| 337 | + } |
---|
| 338 | + |
---|
| 339 | + link_width = port->bonded ? 2 : 1; |
---|
| 340 | + |
---|
| 341 | + up_bw = link_speed * link_width * 1000; /* Mb/s */ |
---|
| 342 | + /* Leave 10% guard band */ |
---|
| 343 | + up_bw -= up_bw / 10; |
---|
| 344 | + down_bw = up_bw; |
---|
| 345 | + |
---|
| 346 | + tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw); |
---|
| 347 | + |
---|
| 348 | + /* |
---|
| 349 | + * Find all DP tunnels that cross the port and reduce |
---|
| 350 | + * their consumed bandwidth from the available. |
---|
| 351 | + */ |
---|
| 352 | + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
---|
| 353 | + int dp_consumed_up, dp_consumed_down; |
---|
| 354 | + |
---|
| 355 | + if (!tb_tunnel_is_dp(tunnel)) |
---|
| 356 | + continue; |
---|
| 357 | + |
---|
| 358 | + if (!tb_tunnel_port_on_path(tunnel, port)) |
---|
| 359 | + continue; |
---|
| 360 | + |
---|
| 361 | + ret = tb_tunnel_consumed_bandwidth(tunnel, |
---|
| 362 | + &dp_consumed_up, |
---|
| 363 | + &dp_consumed_down); |
---|
| 364 | + if (ret) |
---|
| 365 | + return ret; |
---|
| 366 | + |
---|
| 367 | + up_bw -= dp_consumed_up; |
---|
| 368 | + down_bw -= dp_consumed_down; |
---|
| 369 | + } |
---|
| 370 | + |
---|
| 371 | + /* |
---|
| 372 | + * If USB3 is tunneled from the host router down to the |
---|
| 373 | + * branch leading to port we need to take USB3 consumed |
---|
| 374 | + * bandwidth into account regardless whether it actually |
---|
| 375 | + * crosses the port. |
---|
| 376 | + */ |
---|
| 377 | + up_bw -= usb3_consumed_up; |
---|
| 378 | + down_bw -= usb3_consumed_down; |
---|
| 379 | + |
---|
| 380 | + if (up_bw < *available_up) |
---|
| 381 | + *available_up = up_bw; |
---|
| 382 | + if (down_bw < *available_down) |
---|
| 383 | + *available_down = down_bw; |
---|
| 384 | + } |
---|
| 385 | + |
---|
| 386 | + if (*available_up < 0) |
---|
| 387 | + *available_up = 0; |
---|
| 388 | + if (*available_down < 0) |
---|
| 389 | + *available_down = 0; |
---|
| 390 | + |
---|
| 391 | + return 0; |
---|
| 392 | +} |
---|
| 393 | + |
---|
| 394 | +static int tb_release_unused_usb3_bandwidth(struct tb *tb, |
---|
| 395 | + struct tb_port *src_port, |
---|
| 396 | + struct tb_port *dst_port) |
---|
| 397 | +{ |
---|
| 398 | + struct tb_tunnel *tunnel; |
---|
| 399 | + |
---|
| 400 | + tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); |
---|
| 401 | + return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; |
---|
| 402 | +} |
---|
| 403 | + |
---|
| 404 | +static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, |
---|
| 405 | + struct tb_port *dst_port) |
---|
| 406 | +{ |
---|
| 407 | + int ret, available_up, available_down; |
---|
| 408 | + struct tb_tunnel *tunnel; |
---|
| 409 | + |
---|
| 410 | + tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); |
---|
| 411 | + if (!tunnel) |
---|
| 412 | + return; |
---|
| 413 | + |
---|
| 414 | + tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); |
---|
| 415 | + |
---|
| 416 | + /* |
---|
| 417 | + * Calculate available bandwidth for the first hop USB3 tunnel. |
---|
| 418 | + * That determines the whole USB3 bandwidth for this branch. |
---|
| 419 | + */ |
---|
| 420 | + ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, |
---|
| 421 | + &available_up, &available_down); |
---|
| 422 | + if (ret) { |
---|
| 423 | + tb_warn(tb, "failed to calculate available bandwidth\n"); |
---|
| 424 | + return; |
---|
| 425 | + } |
---|
| 426 | + |
---|
| 427 | + tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", |
---|
| 428 | + available_up, available_down); |
---|
| 429 | + |
---|
| 430 | + tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); |
---|
| 431 | +} |
---|
| 432 | + |
---|
| 433 | +static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) |
---|
| 434 | +{ |
---|
| 435 | + struct tb_switch *parent = tb_switch_parent(sw); |
---|
| 436 | + int ret, available_up, available_down; |
---|
| 437 | + struct tb_port *up, *down, *port; |
---|
| 438 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 439 | + struct tb_tunnel *tunnel; |
---|
| 440 | + |
---|
| 441 | + up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); |
---|
| 442 | + if (!up) |
---|
| 443 | + return 0; |
---|
| 444 | + |
---|
| 445 | + if (!sw->link_usb4) |
---|
| 446 | + return 0; |
---|
| 447 | + |
---|
| 448 | + /* |
---|
| 449 | + * Look up available down port. Since we are chaining it should |
---|
| 450 | + * be found right above this switch. |
---|
| 451 | + */ |
---|
| 452 | + port = tb_port_at(tb_route(sw), parent); |
---|
| 453 | + down = tb_find_usb3_down(parent, port); |
---|
| 454 | + if (!down) |
---|
| 455 | + return 0; |
---|
| 456 | + |
---|
| 457 | + if (tb_route(parent)) { |
---|
| 458 | + struct tb_port *parent_up; |
---|
| 459 | + /* |
---|
| 460 | + * Check first that the parent switch has its upstream USB3 |
---|
| 461 | + * port enabled. Otherwise the chain is not complete and |
---|
| 462 | + * there is no point setting up a new tunnel. |
---|
| 463 | + */ |
---|
| 464 | + parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); |
---|
| 465 | + if (!parent_up || !tb_port_is_enabled(parent_up)) |
---|
| 466 | + return 0; |
---|
| 467 | + |
---|
| 468 | + /* Make all unused bandwidth available for the new tunnel */ |
---|
| 469 | + ret = tb_release_unused_usb3_bandwidth(tb, down, up); |
---|
| 470 | + if (ret) |
---|
| 471 | + return ret; |
---|
| 472 | + } |
---|
| 473 | + |
---|
| 474 | + ret = tb_available_bandwidth(tb, down, up, &available_up, |
---|
| 475 | + &available_down); |
---|
| 476 | + if (ret) |
---|
| 477 | + goto err_reclaim; |
---|
| 478 | + |
---|
| 479 | + tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", |
---|
| 480 | + available_up, available_down); |
---|
| 481 | + |
---|
| 482 | + tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, |
---|
| 483 | + available_down); |
---|
| 484 | + if (!tunnel) { |
---|
| 485 | + ret = -ENOMEM; |
---|
| 486 | + goto err_reclaim; |
---|
| 487 | + } |
---|
| 488 | + |
---|
| 489 | + if (tb_tunnel_activate(tunnel)) { |
---|
| 490 | + tb_port_info(up, |
---|
| 491 | + "USB3 tunnel activation failed, aborting\n"); |
---|
| 492 | + ret = -EIO; |
---|
| 493 | + goto err_free; |
---|
| 494 | + } |
---|
| 495 | + |
---|
| 496 | + list_add_tail(&tunnel->list, &tcm->tunnel_list); |
---|
| 497 | + if (tb_route(parent)) |
---|
| 498 | + tb_reclaim_usb3_bandwidth(tb, down, up); |
---|
| 499 | + |
---|
| 500 | + return 0; |
---|
| 501 | + |
---|
| 502 | +err_free: |
---|
| 503 | + tb_tunnel_free(tunnel); |
---|
| 504 | +err_reclaim: |
---|
| 505 | + if (tb_route(parent)) |
---|
| 506 | + tb_reclaim_usb3_bandwidth(tb, down, up); |
---|
| 507 | + |
---|
| 508 | + return ret; |
---|
| 509 | +} |
---|
| 510 | + |
---|
| 511 | +static int tb_create_usb3_tunnels(struct tb_switch *sw) |
---|
| 512 | +{ |
---|
| 513 | + struct tb_port *port; |
---|
| 514 | + int ret; |
---|
| 515 | + |
---|
| 516 | + if (tb_route(sw)) { |
---|
| 517 | + ret = tb_tunnel_usb3(sw->tb, sw); |
---|
| 518 | + if (ret) |
---|
| 519 | + return ret; |
---|
| 520 | + } |
---|
| 521 | + |
---|
| 522 | + tb_switch_for_each_port(sw, port) { |
---|
| 523 | + if (!tb_port_has_remote(port)) |
---|
| 524 | + continue; |
---|
| 525 | + ret = tb_create_usb3_tunnels(port->remote->sw); |
---|
| 526 | + if (ret) |
---|
| 527 | + return ret; |
---|
| 528 | + } |
---|
| 529 | + |
---|
| 530 | + return 0; |
---|
| 531 | +} |
---|
32 | 532 | |
---|
33 | 533 | static void tb_scan_port(struct tb_port *port); |
---|
34 | 534 | |
---|
.. | .. |
---|
37 | 537 | */ |
---|
38 | 538 | static void tb_scan_switch(struct tb_switch *sw) |
---|
39 | 539 | { |
---|
40 | | - int i; |
---|
41 | | - for (i = 1; i <= sw->config.max_port_number; i++) |
---|
42 | | - tb_scan_port(&sw->ports[i]); |
---|
| 540 | + struct tb_port *port; |
---|
| 541 | + |
---|
| 542 | + pm_runtime_get_sync(&sw->dev); |
---|
| 543 | + |
---|
| 544 | + tb_switch_for_each_port(sw, port) |
---|
| 545 | + tb_scan_port(port); |
---|
| 546 | + |
---|
| 547 | + pm_runtime_mark_last_busy(&sw->dev); |
---|
| 548 | + pm_runtime_put_autosuspend(&sw->dev); |
---|
43 | 549 | } |
---|
44 | 550 | |
---|
45 | 551 | /** |
---|
.. | .. |
---|
47 | 553 | */ |
---|
48 | 554 | static void tb_scan_port(struct tb_port *port) |
---|
49 | 555 | { |
---|
| 556 | + struct tb_cm *tcm = tb_priv(port->sw->tb); |
---|
| 557 | + struct tb_port *upstream_port; |
---|
50 | 558 | struct tb_switch *sw; |
---|
| 559 | + |
---|
51 | 560 | if (tb_is_upstream_port(port)) |
---|
52 | 561 | return; |
---|
| 562 | + |
---|
| 563 | + if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && |
---|
| 564 | + !tb_dp_port_is_enabled(port)) { |
---|
| 565 | + tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); |
---|
| 566 | + tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, |
---|
| 567 | + false); |
---|
| 568 | + return; |
---|
| 569 | + } |
---|
| 570 | + |
---|
53 | 571 | if (port->config.type != TB_TYPE_PORT) |
---|
54 | 572 | return; |
---|
55 | 573 | if (port->dual_link_port && port->link_nr) |
---|
.. | .. |
---|
60 | 578 | if (tb_wait_for_port(port, false) <= 0) |
---|
61 | 579 | return; |
---|
62 | 580 | if (port->remote) { |
---|
63 | | - tb_port_WARN(port, "port already has a remote!\n"); |
---|
| 581 | + tb_port_dbg(port, "port already has a remote\n"); |
---|
64 | 582 | return; |
---|
65 | 583 | } |
---|
| 584 | + |
---|
| 585 | + tb_retimer_scan(port); |
---|
| 586 | + |
---|
66 | 587 | sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, |
---|
67 | 588 | tb_downstream_route(port)); |
---|
68 | | - if (!sw) |
---|
| 589 | + if (IS_ERR(sw)) { |
---|
| 590 | + /* |
---|
| 591 | + * If there is an error accessing the connected switch |
---|
| 592 | + * it may be connected to another domain. Also we allow |
---|
| 593 | + * the other domain to be connected to a max depth switch. |
---|
| 594 | + */ |
---|
| 595 | + if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) |
---|
| 596 | + tb_scan_xdomain(port); |
---|
69 | 597 | return; |
---|
| 598 | + } |
---|
70 | 599 | |
---|
71 | 600 | if (tb_switch_configure(sw)) { |
---|
72 | 601 | tb_switch_put(sw); |
---|
73 | 602 | return; |
---|
74 | 603 | } |
---|
75 | 604 | |
---|
76 | | - sw->authorized = true; |
---|
| 605 | + /* |
---|
| 606 | + * If there was previously another domain connected remove it |
---|
| 607 | + * first. |
---|
| 608 | + */ |
---|
| 609 | + if (port->xdomain) { |
---|
| 610 | + tb_xdomain_remove(port->xdomain); |
---|
| 611 | + tb_port_unconfigure_xdomain(port); |
---|
| 612 | + port->xdomain = NULL; |
---|
| 613 | + } |
---|
| 614 | + |
---|
| 615 | + /* |
---|
| 616 | + * Do not send uevents until we have discovered all existing |
---|
| 617 | + * tunnels and know which switches were authorized already by |
---|
| 618 | + * the boot firmware. |
---|
| 619 | + */ |
---|
| 620 | + if (!tcm->hotplug_active) |
---|
| 621 | + dev_set_uevent_suppress(&sw->dev, true); |
---|
| 622 | + |
---|
| 623 | + /* |
---|
| 624 | + * At the moment Thunderbolt 2 and beyond (devices with LC) we |
---|
| 625 | + * can support runtime PM. |
---|
| 626 | + */ |
---|
| 627 | + sw->rpm = sw->generation > 1; |
---|
77 | 628 | |
---|
78 | 629 | if (tb_switch_add(sw)) { |
---|
79 | 630 | tb_switch_put(sw); |
---|
80 | 631 | return; |
---|
81 | 632 | } |
---|
82 | 633 | |
---|
83 | | - port->remote = tb_upstream_port(sw); |
---|
84 | | - tb_upstream_port(sw)->remote = port; |
---|
| 634 | + /* Link the switches using both links if available */ |
---|
| 635 | + upstream_port = tb_upstream_port(sw); |
---|
| 636 | + port->remote = upstream_port; |
---|
| 637 | + upstream_port->remote = port; |
---|
| 638 | + if (port->dual_link_port && upstream_port->dual_link_port) { |
---|
| 639 | + port->dual_link_port->remote = upstream_port->dual_link_port; |
---|
| 640 | + upstream_port->dual_link_port->remote = port->dual_link_port; |
---|
| 641 | + } |
---|
| 642 | + |
---|
| 643 | + /* Enable lane bonding if supported */ |
---|
| 644 | + tb_switch_lane_bonding_enable(sw); |
---|
| 645 | + /* Set the link configured */ |
---|
| 646 | + tb_switch_configure_link(sw); |
---|
| 647 | + |
---|
| 648 | + if (tb_enable_tmu(sw)) |
---|
| 649 | + tb_sw_warn(sw, "failed to enable TMU\n"); |
---|
| 650 | + |
---|
| 651 | + /* Scan upstream retimers */ |
---|
| 652 | + tb_retimer_scan(upstream_port); |
---|
| 653 | + |
---|
| 654 | + /* |
---|
| 655 | + * Create USB 3.x tunnels only when the switch is plugged to the |
---|
| 656 | + * domain. This is because we scan the domain also during discovery |
---|
| 657 | + * and want to discover existing USB 3.x tunnels before we create |
---|
| 658 | + * any new. |
---|
| 659 | + */ |
---|
| 660 | + if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) |
---|
| 661 | + tb_sw_warn(sw, "USB3 tunnel creation failed\n"); |
---|
| 662 | + |
---|
| 663 | + tb_add_dp_resources(sw); |
---|
85 | 664 | tb_scan_switch(sw); |
---|
| 665 | +} |
---|
| 666 | + |
---|
| 667 | +static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) |
---|
| 668 | +{ |
---|
| 669 | + struct tb_port *src_port, *dst_port; |
---|
| 670 | + struct tb *tb; |
---|
| 671 | + |
---|
| 672 | + if (!tunnel) |
---|
| 673 | + return; |
---|
| 674 | + |
---|
| 675 | + tb_tunnel_deactivate(tunnel); |
---|
| 676 | + list_del(&tunnel->list); |
---|
| 677 | + |
---|
| 678 | + tb = tunnel->tb; |
---|
| 679 | + src_port = tunnel->src_port; |
---|
| 680 | + dst_port = tunnel->dst_port; |
---|
| 681 | + |
---|
| 682 | + switch (tunnel->type) { |
---|
| 683 | + case TB_TUNNEL_DP: |
---|
| 684 | + /* |
---|
| 685 | + * In case of DP tunnel make sure the DP IN resource is |
---|
| 686 | + * deallocated properly. |
---|
| 687 | + */ |
---|
| 688 | + tb_switch_dealloc_dp_resource(src_port->sw, src_port); |
---|
| 689 | + /* Now we can allow the domain to runtime suspend again */ |
---|
| 690 | + pm_runtime_mark_last_busy(&dst_port->sw->dev); |
---|
| 691 | + pm_runtime_put_autosuspend(&dst_port->sw->dev); |
---|
| 692 | + pm_runtime_mark_last_busy(&src_port->sw->dev); |
---|
| 693 | + pm_runtime_put_autosuspend(&src_port->sw->dev); |
---|
| 694 | + fallthrough; |
---|
| 695 | + |
---|
| 696 | + case TB_TUNNEL_USB3: |
---|
| 697 | + tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); |
---|
| 698 | + break; |
---|
| 699 | + |
---|
| 700 | + default: |
---|
| 701 | + /* |
---|
| 702 | + * PCIe and DMA tunnels do not consume guaranteed |
---|
| 703 | + * bandwidth. |
---|
| 704 | + */ |
---|
| 705 | + break; |
---|
| 706 | + } |
---|
| 707 | + |
---|
| 708 | + tb_tunnel_free(tunnel); |
---|
86 | 709 | } |
---|
87 | 710 | |
---|
88 | 711 | /** |
---|
.. | .. |
---|
91 | 714 | static void tb_free_invalid_tunnels(struct tb *tb) |
---|
92 | 715 | { |
---|
93 | 716 | struct tb_cm *tcm = tb_priv(tb); |
---|
94 | | - struct tb_pci_tunnel *tunnel; |
---|
95 | | - struct tb_pci_tunnel *n; |
---|
| 717 | + struct tb_tunnel *tunnel; |
---|
| 718 | + struct tb_tunnel *n; |
---|
96 | 719 | |
---|
97 | 720 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
---|
98 | | - if (tb_pci_is_invalid(tunnel)) { |
---|
99 | | - tb_pci_deactivate(tunnel); |
---|
100 | | - list_del(&tunnel->list); |
---|
101 | | - tb_pci_free(tunnel); |
---|
102 | | - } |
---|
| 721 | + if (tb_tunnel_is_invalid(tunnel)) |
---|
| 722 | + tb_deactivate_and_free_tunnel(tunnel); |
---|
103 | 723 | } |
---|
104 | 724 | } |
---|
105 | 725 | |
---|
.. | .. |
---|
108 | 728 | */ |
---|
109 | 729 | static void tb_free_unplugged_children(struct tb_switch *sw) |
---|
110 | 730 | { |
---|
111 | | - int i; |
---|
112 | | - for (i = 1; i <= sw->config.max_port_number; i++) { |
---|
113 | | - struct tb_port *port = &sw->ports[i]; |
---|
114 | | - if (tb_is_upstream_port(port)) |
---|
| 731 | + struct tb_port *port; |
---|
| 732 | + |
---|
| 733 | + tb_switch_for_each_port(sw, port) { |
---|
| 734 | + if (!tb_port_has_remote(port)) |
---|
115 | 735 | continue; |
---|
116 | | - if (!port->remote) |
---|
117 | | - continue; |
---|
| 736 | + |
---|
118 | 737 | if (port->remote->sw->is_unplugged) { |
---|
| 738 | + tb_retimer_remove_all(port); |
---|
| 739 | + tb_remove_dp_resources(port->remote->sw); |
---|
| 740 | + tb_switch_unconfigure_link(port->remote->sw); |
---|
| 741 | + tb_switch_lane_bonding_disable(port->remote->sw); |
---|
119 | 742 | tb_switch_remove(port->remote->sw); |
---|
120 | 743 | port->remote = NULL; |
---|
| 744 | + if (port->dual_link_port) |
---|
| 745 | + port->dual_link_port->remote = NULL; |
---|
121 | 746 | } else { |
---|
122 | 747 | tb_free_unplugged_children(port->remote->sw); |
---|
123 | 748 | } |
---|
124 | 749 | } |
---|
125 | 750 | } |
---|
126 | 751 | |
---|
127 | | - |
---|
128 | | -/** |
---|
129 | | - * find_pci_up_port() - return the first PCIe up port on @sw or NULL |
---|
130 | | - */ |
---|
131 | | -static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw) |
---|
| 752 | +static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, |
---|
| 753 | + const struct tb_port *port) |
---|
132 | 754 | { |
---|
133 | | - int i; |
---|
134 | | - for (i = 1; i <= sw->config.max_port_number; i++) |
---|
135 | | - if (sw->ports[i].config.type == TB_TYPE_PCIE_UP) |
---|
136 | | - return &sw->ports[i]; |
---|
137 | | - return NULL; |
---|
138 | | -} |
---|
| 755 | + struct tb_port *down = NULL; |
---|
139 | 756 | |
---|
140 | | -/** |
---|
141 | | - * find_unused_down_port() - return the first inactive PCIe down port on @sw |
---|
142 | | - */ |
---|
143 | | -static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw) |
---|
144 | | -{ |
---|
145 | | - int i; |
---|
146 | | - int cap; |
---|
147 | | - int res; |
---|
148 | | - int data; |
---|
149 | | - for (i = 1; i <= sw->config.max_port_number; i++) { |
---|
150 | | - if (tb_is_upstream_port(&sw->ports[i])) |
---|
151 | | - continue; |
---|
152 | | - if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN) |
---|
153 | | - continue; |
---|
154 | | - cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP); |
---|
155 | | - if (cap < 0) |
---|
156 | | - continue; |
---|
157 | | - res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1); |
---|
158 | | - if (res < 0) |
---|
159 | | - continue; |
---|
160 | | - if (data & 0x80000000) |
---|
161 | | - continue; |
---|
162 | | - return &sw->ports[i]; |
---|
| 757 | + /* |
---|
| 758 | + * To keep plugging devices consistently in the same PCIe |
---|
| 759 | + * hierarchy, do mapping here for switch downstream PCIe ports. |
---|
| 760 | + */ |
---|
| 761 | + if (tb_switch_is_usb4(sw)) { |
---|
| 762 | + down = usb4_switch_map_pcie_down(sw, port); |
---|
| 763 | + } else if (!tb_route(sw)) { |
---|
| 764 | + int phy_port = tb_phy_port_from_link(port->port); |
---|
| 765 | + int index; |
---|
| 766 | + |
---|
| 767 | + /* |
---|
| 768 | + * Hard-coded Thunderbolt port to PCIe down port mapping |
---|
| 769 | + * per controller. |
---|
| 770 | + */ |
---|
| 771 | + if (tb_switch_is_cactus_ridge(sw) || |
---|
| 772 | + tb_switch_is_alpine_ridge(sw)) |
---|
| 773 | + index = !phy_port ? 6 : 7; |
---|
| 774 | + else if (tb_switch_is_falcon_ridge(sw)) |
---|
| 775 | + index = !phy_port ? 6 : 8; |
---|
| 776 | + else if (tb_switch_is_titan_ridge(sw)) |
---|
| 777 | + index = !phy_port ? 8 : 9; |
---|
| 778 | + else |
---|
| 779 | + goto out; |
---|
| 780 | + |
---|
| 781 | + /* Validate the hard-coding */ |
---|
| 782 | + if (WARN_ON(index > sw->config.max_port_number)) |
---|
| 783 | + goto out; |
---|
| 784 | + |
---|
| 785 | + down = &sw->ports[index]; |
---|
163 | 786 | } |
---|
164 | | - return NULL; |
---|
| 787 | + |
---|
| 788 | + if (down) { |
---|
| 789 | + if (WARN_ON(!tb_port_is_pcie_down(down))) |
---|
| 790 | + goto out; |
---|
| 791 | + if (tb_pci_port_is_enabled(down)) |
---|
| 792 | + goto out; |
---|
| 793 | + |
---|
| 794 | + return down; |
---|
| 795 | + } |
---|
| 796 | + |
---|
| 797 | +out: |
---|
| 798 | + return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); |
---|
165 | 799 | } |
---|
166 | 800 | |
---|
167 | | -/** |
---|
168 | | - * tb_activate_pcie_devices() - scan for and activate PCIe devices |
---|
169 | | - * |
---|
170 | | - * This method is somewhat ad hoc. For now it only supports one device |
---|
171 | | - * per port and only devices at depth 1. |
---|
172 | | - */ |
---|
173 | | -static void tb_activate_pcie_devices(struct tb *tb) |
---|
| 801 | +static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) |
---|
174 | 802 | { |
---|
175 | | - int i; |
---|
176 | | - int cap; |
---|
177 | | - u32 data; |
---|
178 | | - struct tb_switch *sw; |
---|
179 | | - struct tb_port *up_port; |
---|
180 | | - struct tb_port *down_port; |
---|
181 | | - struct tb_pci_tunnel *tunnel; |
---|
| 803 | + struct tb_port *host_port, *port; |
---|
182 | 804 | struct tb_cm *tcm = tb_priv(tb); |
---|
183 | 805 | |
---|
184 | | - /* scan for pcie devices at depth 1*/ |
---|
185 | | - for (i = 1; i <= tb->root_switch->config.max_port_number; i++) { |
---|
186 | | - if (tb_is_upstream_port(&tb->root_switch->ports[i])) |
---|
| 806 | + host_port = tb_route(in->sw) ? |
---|
| 807 | + tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; |
---|
| 808 | + |
---|
| 809 | + list_for_each_entry(port, &tcm->dp_resources, list) { |
---|
| 810 | + if (!tb_port_is_dpout(port)) |
---|
187 | 811 | continue; |
---|
188 | | - if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT) |
---|
189 | | - continue; |
---|
190 | | - if (!tb->root_switch->ports[i].remote) |
---|
191 | | - continue; |
---|
192 | | - sw = tb->root_switch->ports[i].remote->sw; |
---|
193 | | - up_port = tb_find_pci_up_port(sw); |
---|
194 | | - if (!up_port) { |
---|
195 | | - tb_sw_info(sw, "no PCIe devices found, aborting\n"); |
---|
| 812 | + |
---|
| 813 | + if (tb_port_is_enabled(port)) { |
---|
| 814 | + tb_port_dbg(port, "in use\n"); |
---|
196 | 815 | continue; |
---|
197 | 816 | } |
---|
198 | 817 | |
---|
199 | | - /* check whether port is already activated */ |
---|
200 | | - cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP); |
---|
201 | | - if (cap < 0) |
---|
202 | | - continue; |
---|
203 | | - if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1)) |
---|
204 | | - continue; |
---|
205 | | - if (data & 0x80000000) { |
---|
206 | | - tb_port_info(up_port, |
---|
207 | | - "PCIe port already activated, aborting\n"); |
---|
208 | | - continue; |
---|
| 818 | + tb_port_dbg(port, "DP OUT available\n"); |
---|
| 819 | + |
---|
| 820 | + /* |
---|
| 821 | + * Keep the DP tunnel under the topology starting from |
---|
| 822 | + * the same host router downstream port. |
---|
| 823 | + */ |
---|
| 824 | + if (host_port && tb_route(port->sw)) { |
---|
| 825 | + struct tb_port *p; |
---|
| 826 | + |
---|
| 827 | + p = tb_port_at(tb_route(port->sw), tb->root_switch); |
---|
| 828 | + if (p != host_port) |
---|
| 829 | + continue; |
---|
209 | 830 | } |
---|
210 | 831 | |
---|
211 | | - down_port = tb_find_unused_down_port(tb->root_switch); |
---|
212 | | - if (!down_port) { |
---|
213 | | - tb_port_info(up_port, |
---|
214 | | - "All PCIe down ports are occupied, aborting\n"); |
---|
215 | | - continue; |
---|
216 | | - } |
---|
217 | | - tunnel = tb_pci_alloc(tb, up_port, down_port); |
---|
218 | | - if (!tunnel) { |
---|
219 | | - tb_port_info(up_port, |
---|
220 | | - "PCIe tunnel allocation failed, aborting\n"); |
---|
221 | | - continue; |
---|
222 | | - } |
---|
223 | | - |
---|
224 | | - if (tb_pci_activate(tunnel)) { |
---|
225 | | - tb_port_info(up_port, |
---|
226 | | - "PCIe tunnel activation failed, aborting\n"); |
---|
227 | | - tb_pci_free(tunnel); |
---|
228 | | - continue; |
---|
229 | | - } |
---|
230 | | - |
---|
231 | | - list_add(&tunnel->list, &tcm->tunnel_list); |
---|
| 832 | + return port; |
---|
232 | 833 | } |
---|
| 834 | + |
---|
| 835 | + return NULL; |
---|
| 836 | +} |
---|
| 837 | + |
---|
| 838 | +static void tb_tunnel_dp(struct tb *tb) |
---|
| 839 | +{ |
---|
| 840 | + int available_up, available_down, ret; |
---|
| 841 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 842 | + struct tb_port *port, *in, *out; |
---|
| 843 | + struct tb_tunnel *tunnel; |
---|
| 844 | + |
---|
| 845 | + /* |
---|
| 846 | + * Find pair of inactive DP IN and DP OUT adapters and then |
---|
| 847 | + * establish a DP tunnel between them. |
---|
| 848 | + */ |
---|
| 849 | + tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); |
---|
| 850 | + |
---|
| 851 | + in = NULL; |
---|
| 852 | + out = NULL; |
---|
| 853 | + list_for_each_entry(port, &tcm->dp_resources, list) { |
---|
| 854 | + if (!tb_port_is_dpin(port)) |
---|
| 855 | + continue; |
---|
| 856 | + |
---|
| 857 | + if (tb_port_is_enabled(port)) { |
---|
| 858 | + tb_port_dbg(port, "in use\n"); |
---|
| 859 | + continue; |
---|
| 860 | + } |
---|
| 861 | + |
---|
| 862 | + tb_port_dbg(port, "DP IN available\n"); |
---|
| 863 | + |
---|
| 864 | + out = tb_find_dp_out(tb, port); |
---|
| 865 | + if (out) { |
---|
| 866 | + in = port; |
---|
| 867 | + break; |
---|
| 868 | + } |
---|
| 869 | + } |
---|
| 870 | + |
---|
| 871 | + if (!in) { |
---|
| 872 | + tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); |
---|
| 873 | + return; |
---|
| 874 | + } |
---|
| 875 | + if (!out) { |
---|
| 876 | + tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); |
---|
| 877 | + return; |
---|
| 878 | + } |
---|
| 879 | + |
---|
| 880 | + /* |
---|
| 881 | + * DP stream needs the domain to be active so runtime resume |
---|
| 882 | + * both ends of the tunnel. |
---|
| 883 | + * |
---|
| 884 | + * This should bring the routers in the middle active as well |
---|
| 885 | + * and keeps the domain from runtime suspending while the DP |
---|
| 886 | + * tunnel is active. |
---|
| 887 | + */ |
---|
| 888 | + pm_runtime_get_sync(&in->sw->dev); |
---|
| 889 | + pm_runtime_get_sync(&out->sw->dev); |
---|
| 890 | + |
---|
| 891 | + if (tb_switch_alloc_dp_resource(in->sw, in)) { |
---|
| 892 | + tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); |
---|
| 893 | + goto err_rpm_put; |
---|
| 894 | + } |
---|
| 895 | + |
---|
| 896 | + /* Make all unused USB3 bandwidth available for the new DP tunnel */ |
---|
| 897 | + ret = tb_release_unused_usb3_bandwidth(tb, in, out); |
---|
| 898 | + if (ret) { |
---|
| 899 | + tb_warn(tb, "failed to release unused bandwidth\n"); |
---|
| 900 | + goto err_dealloc_dp; |
---|
| 901 | + } |
---|
| 902 | + |
---|
| 903 | + ret = tb_available_bandwidth(tb, in, out, &available_up, |
---|
| 904 | + &available_down); |
---|
| 905 | + if (ret) |
---|
| 906 | + goto err_reclaim; |
---|
| 907 | + |
---|
| 908 | + tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", |
---|
| 909 | + available_up, available_down); |
---|
| 910 | + |
---|
| 911 | + tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down); |
---|
| 912 | + if (!tunnel) { |
---|
| 913 | + tb_port_dbg(out, "could not allocate DP tunnel\n"); |
---|
| 914 | + goto err_reclaim; |
---|
| 915 | + } |
---|
| 916 | + |
---|
| 917 | + if (tb_tunnel_activate(tunnel)) { |
---|
| 918 | + tb_port_info(out, "DP tunnel activation failed, aborting\n"); |
---|
| 919 | + goto err_free; |
---|
| 920 | + } |
---|
| 921 | + |
---|
| 922 | + list_add_tail(&tunnel->list, &tcm->tunnel_list); |
---|
| 923 | + tb_reclaim_usb3_bandwidth(tb, in, out); |
---|
| 924 | + return; |
---|
| 925 | + |
---|
| 926 | +err_free: |
---|
| 927 | + tb_tunnel_free(tunnel); |
---|
| 928 | +err_reclaim: |
---|
| 929 | + tb_reclaim_usb3_bandwidth(tb, in, out); |
---|
| 930 | +err_dealloc_dp: |
---|
| 931 | + tb_switch_dealloc_dp_resource(in->sw, in); |
---|
| 932 | +err_rpm_put: |
---|
| 933 | + pm_runtime_mark_last_busy(&out->sw->dev); |
---|
| 934 | + pm_runtime_put_autosuspend(&out->sw->dev); |
---|
| 935 | + pm_runtime_mark_last_busy(&in->sw->dev); |
---|
| 936 | + pm_runtime_put_autosuspend(&in->sw->dev); |
---|
| 937 | +} |
---|
| 938 | + |
---|
| 939 | +static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) |
---|
| 940 | +{ |
---|
| 941 | + struct tb_port *in, *out; |
---|
| 942 | + struct tb_tunnel *tunnel; |
---|
| 943 | + |
---|
| 944 | + if (tb_port_is_dpin(port)) { |
---|
| 945 | + tb_port_dbg(port, "DP IN resource unavailable\n"); |
---|
| 946 | + in = port; |
---|
| 947 | + out = NULL; |
---|
| 948 | + } else { |
---|
| 949 | + tb_port_dbg(port, "DP OUT resource unavailable\n"); |
---|
| 950 | + in = NULL; |
---|
| 951 | + out = port; |
---|
| 952 | + } |
---|
| 953 | + |
---|
| 954 | + tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); |
---|
| 955 | + tb_deactivate_and_free_tunnel(tunnel); |
---|
| 956 | + list_del_init(&port->list); |
---|
| 957 | + |
---|
| 958 | + /* |
---|
| 959 | + * See if there is another DP OUT port that can be used for |
---|
| 960 | + * to create another tunnel. |
---|
| 961 | + */ |
---|
| 962 | + tb_tunnel_dp(tb); |
---|
| 963 | +} |
---|
| 964 | + |
---|
| 965 | +static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) |
---|
| 966 | +{ |
---|
| 967 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 968 | + struct tb_port *p; |
---|
| 969 | + |
---|
| 970 | + if (tb_port_is_enabled(port)) |
---|
| 971 | + return; |
---|
| 972 | + |
---|
| 973 | + list_for_each_entry(p, &tcm->dp_resources, list) { |
---|
| 974 | + if (p == port) |
---|
| 975 | + return; |
---|
| 976 | + } |
---|
| 977 | + |
---|
| 978 | + tb_port_dbg(port, "DP %s resource available\n", |
---|
| 979 | + tb_port_is_dpin(port) ? "IN" : "OUT"); |
---|
| 980 | + list_add_tail(&port->list, &tcm->dp_resources); |
---|
| 981 | + |
---|
| 982 | + /* Look for suitable DP IN <-> DP OUT pairs now */ |
---|
| 983 | + tb_tunnel_dp(tb); |
---|
| 984 | +} |
---|
| 985 | + |
---|
| 986 | +static void tb_disconnect_and_release_dp(struct tb *tb) |
---|
| 987 | +{ |
---|
| 988 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 989 | + struct tb_tunnel *tunnel, *n; |
---|
| 990 | + |
---|
| 991 | + /* |
---|
| 992 | + * Tear down all DP tunnels and release their resources. They |
---|
| 993 | + * will be re-established after resume based on plug events. |
---|
| 994 | + */ |
---|
| 995 | + list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { |
---|
| 996 | + if (tb_tunnel_is_dp(tunnel)) |
---|
| 997 | + tb_deactivate_and_free_tunnel(tunnel); |
---|
| 998 | + } |
---|
| 999 | + |
---|
| 1000 | + while (!list_empty(&tcm->dp_resources)) { |
---|
| 1001 | + struct tb_port *port; |
---|
| 1002 | + |
---|
| 1003 | + port = list_first_entry(&tcm->dp_resources, |
---|
| 1004 | + struct tb_port, list); |
---|
| 1005 | + list_del_init(&port->list); |
---|
| 1006 | + } |
---|
| 1007 | +} |
---|
| 1008 | + |
---|
| 1009 | +static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) |
---|
| 1010 | +{ |
---|
| 1011 | + struct tb_port *up, *down, *port; |
---|
| 1012 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 1013 | + struct tb_switch *parent_sw; |
---|
| 1014 | + struct tb_tunnel *tunnel; |
---|
| 1015 | + |
---|
| 1016 | + up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); |
---|
| 1017 | + if (!up) |
---|
| 1018 | + return 0; |
---|
| 1019 | + |
---|
| 1020 | + /* |
---|
| 1021 | + * Look up available down port. Since we are chaining it should |
---|
| 1022 | + * be found right above this switch. |
---|
| 1023 | + */ |
---|
| 1024 | + parent_sw = tb_to_switch(sw->dev.parent); |
---|
| 1025 | + port = tb_port_at(tb_route(sw), parent_sw); |
---|
| 1026 | + down = tb_find_pcie_down(parent_sw, port); |
---|
| 1027 | + if (!down) |
---|
| 1028 | + return 0; |
---|
| 1029 | + |
---|
| 1030 | + tunnel = tb_tunnel_alloc_pci(tb, up, down); |
---|
| 1031 | + if (!tunnel) |
---|
| 1032 | + return -ENOMEM; |
---|
| 1033 | + |
---|
| 1034 | + if (tb_tunnel_activate(tunnel)) { |
---|
| 1035 | + tb_port_info(up, |
---|
| 1036 | + "PCIe tunnel activation failed, aborting\n"); |
---|
| 1037 | + tb_tunnel_free(tunnel); |
---|
| 1038 | + return -EIO; |
---|
| 1039 | + } |
---|
| 1040 | + |
---|
| 1041 | + list_add_tail(&tunnel->list, &tcm->tunnel_list); |
---|
| 1042 | + return 0; |
---|
| 1043 | +} |
---|
| 1044 | + |
---|
| 1045 | +static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) |
---|
| 1046 | +{ |
---|
| 1047 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 1048 | + struct tb_port *nhi_port, *dst_port; |
---|
| 1049 | + struct tb_tunnel *tunnel; |
---|
| 1050 | + struct tb_switch *sw; |
---|
| 1051 | + |
---|
| 1052 | + sw = tb_to_switch(xd->dev.parent); |
---|
| 1053 | + dst_port = tb_port_at(xd->route, sw); |
---|
| 1054 | + nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); |
---|
| 1055 | + |
---|
| 1056 | + mutex_lock(&tb->lock); |
---|
| 1057 | + tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, |
---|
| 1058 | + xd->transmit_path, xd->receive_ring, |
---|
| 1059 | + xd->receive_path); |
---|
| 1060 | + if (!tunnel) { |
---|
| 1061 | + mutex_unlock(&tb->lock); |
---|
| 1062 | + return -ENOMEM; |
---|
| 1063 | + } |
---|
| 1064 | + |
---|
| 1065 | + if (tb_tunnel_activate(tunnel)) { |
---|
| 1066 | + tb_port_info(nhi_port, |
---|
| 1067 | + "DMA tunnel activation failed, aborting\n"); |
---|
| 1068 | + tb_tunnel_free(tunnel); |
---|
| 1069 | + mutex_unlock(&tb->lock); |
---|
| 1070 | + return -EIO; |
---|
| 1071 | + } |
---|
| 1072 | + |
---|
| 1073 | + list_add_tail(&tunnel->list, &tcm->tunnel_list); |
---|
| 1074 | + mutex_unlock(&tb->lock); |
---|
| 1075 | + return 0; |
---|
| 1076 | +} |
---|
| 1077 | + |
---|
| 1078 | +static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) |
---|
| 1079 | +{ |
---|
| 1080 | + struct tb_port *dst_port; |
---|
| 1081 | + struct tb_tunnel *tunnel; |
---|
| 1082 | + struct tb_switch *sw; |
---|
| 1083 | + |
---|
| 1084 | + sw = tb_to_switch(xd->dev.parent); |
---|
| 1085 | + dst_port = tb_port_at(xd->route, sw); |
---|
| 1086 | + |
---|
| 1087 | + /* |
---|
| 1088 | + * It is possible that the tunnel was already teared down (in |
---|
| 1089 | + * case of cable disconnect) so it is fine if we cannot find it |
---|
| 1090 | + * here anymore. |
---|
| 1091 | + */ |
---|
| 1092 | + tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); |
---|
| 1093 | + tb_deactivate_and_free_tunnel(tunnel); |
---|
| 1094 | +} |
---|
| 1095 | + |
---|
| 1096 | +static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) |
---|
| 1097 | +{ |
---|
| 1098 | + if (!xd->is_unplugged) { |
---|
| 1099 | + mutex_lock(&tb->lock); |
---|
| 1100 | + __tb_disconnect_xdomain_paths(tb, xd); |
---|
| 1101 | + mutex_unlock(&tb->lock); |
---|
| 1102 | + } |
---|
| 1103 | + return 0; |
---|
233 | 1104 | } |
---|
234 | 1105 | |
---|
235 | 1106 | /* hotplug handling */ |
---|
236 | | - |
---|
237 | | -struct tb_hotplug_event { |
---|
238 | | - struct work_struct work; |
---|
239 | | - struct tb *tb; |
---|
240 | | - u64 route; |
---|
241 | | - u8 port; |
---|
242 | | - bool unplug; |
---|
243 | | -}; |
---|
244 | 1107 | |
---|
245 | 1108 | /** |
---|
246 | 1109 | * tb_handle_hotplug() - handle hotplug event |
---|
.. | .. |
---|
254 | 1117 | struct tb_cm *tcm = tb_priv(tb); |
---|
255 | 1118 | struct tb_switch *sw; |
---|
256 | 1119 | struct tb_port *port; |
---|
| 1120 | + |
---|
| 1121 | + /* Bring the domain back from sleep if it was suspended */ |
---|
| 1122 | + pm_runtime_get_sync(&tb->dev); |
---|
| 1123 | + |
---|
257 | 1124 | mutex_lock(&tb->lock); |
---|
258 | 1125 | if (!tcm->hotplug_active) |
---|
259 | 1126 | goto out; /* during init, suspend or shutdown */ |
---|
.. | .. |
---|
273 | 1140 | } |
---|
274 | 1141 | port = &sw->ports[ev->port]; |
---|
275 | 1142 | if (tb_is_upstream_port(port)) { |
---|
276 | | - tb_warn(tb, |
---|
277 | | - "hotplug event for upstream port %llx:%x (unplug: %d)\n", |
---|
278 | | - ev->route, ev->port, ev->unplug); |
---|
| 1143 | + tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", |
---|
| 1144 | + ev->route, ev->port, ev->unplug); |
---|
279 | 1145 | goto put_sw; |
---|
280 | 1146 | } |
---|
| 1147 | + |
---|
| 1148 | + pm_runtime_get_sync(&sw->dev); |
---|
| 1149 | + |
---|
281 | 1150 | if (ev->unplug) { |
---|
282 | | - if (port->remote) { |
---|
283 | | - tb_port_info(port, "unplugged\n"); |
---|
| 1151 | + tb_retimer_remove_all(port); |
---|
| 1152 | + |
---|
| 1153 | + if (tb_port_has_remote(port)) { |
---|
| 1154 | + tb_port_dbg(port, "switch unplugged\n"); |
---|
284 | 1155 | tb_sw_set_unplugged(port->remote->sw); |
---|
285 | 1156 | tb_free_invalid_tunnels(tb); |
---|
| 1157 | + tb_remove_dp_resources(port->remote->sw); |
---|
| 1158 | + tb_switch_tmu_disable(port->remote->sw); |
---|
| 1159 | + tb_switch_unconfigure_link(port->remote->sw); |
---|
| 1160 | + tb_switch_lane_bonding_disable(port->remote->sw); |
---|
286 | 1161 | tb_switch_remove(port->remote->sw); |
---|
287 | 1162 | port->remote = NULL; |
---|
| 1163 | + if (port->dual_link_port) |
---|
| 1164 | + port->dual_link_port->remote = NULL; |
---|
| 1165 | + /* Maybe we can create another DP tunnel */ |
---|
| 1166 | + tb_tunnel_dp(tb); |
---|
| 1167 | + } else if (port->xdomain) { |
---|
| 1168 | + struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); |
---|
| 1169 | + |
---|
| 1170 | + tb_port_dbg(port, "xdomain unplugged\n"); |
---|
| 1171 | + /* |
---|
| 1172 | + * Service drivers are unbound during |
---|
| 1173 | + * tb_xdomain_remove() so setting XDomain as |
---|
| 1174 | + * unplugged here prevents deadlock if they call |
---|
| 1175 | + * tb_xdomain_disable_paths(). We will tear down |
---|
| 1176 | + * the path below. |
---|
| 1177 | + */ |
---|
| 1178 | + xd->is_unplugged = true; |
---|
| 1179 | + tb_xdomain_remove(xd); |
---|
| 1180 | + port->xdomain = NULL; |
---|
| 1181 | + __tb_disconnect_xdomain_paths(tb, xd); |
---|
| 1182 | + tb_xdomain_put(xd); |
---|
| 1183 | + tb_port_unconfigure_xdomain(port); |
---|
| 1184 | + } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { |
---|
| 1185 | + tb_dp_resource_unavailable(tb, port); |
---|
288 | 1186 | } else { |
---|
289 | | - tb_port_info(port, |
---|
290 | | - "got unplug event for disconnected port, ignoring\n"); |
---|
| 1187 | + tb_port_dbg(port, |
---|
| 1188 | + "got unplug event for disconnected port, ignoring\n"); |
---|
291 | 1189 | } |
---|
292 | 1190 | } else if (port->remote) { |
---|
293 | | - tb_port_info(port, |
---|
294 | | - "got plug event for connected port, ignoring\n"); |
---|
| 1191 | + tb_port_dbg(port, "got plug event for connected port, ignoring\n"); |
---|
295 | 1192 | } else { |
---|
296 | | - tb_port_info(port, "hotplug: scanning\n"); |
---|
297 | | - tb_scan_port(port); |
---|
298 | | - if (!port->remote) { |
---|
299 | | - tb_port_info(port, "hotplug: no switch found\n"); |
---|
300 | | - } else if (port->remote->sw->config.depth > 1) { |
---|
301 | | - tb_sw_warn(port->remote->sw, |
---|
302 | | - "hotplug: chaining not supported\n"); |
---|
303 | | - } else { |
---|
304 | | - tb_sw_info(port->remote->sw, |
---|
305 | | - "hotplug: activating pcie devices\n"); |
---|
306 | | - tb_activate_pcie_devices(tb); |
---|
| 1193 | + if (tb_port_is_null(port)) { |
---|
| 1194 | + tb_port_dbg(port, "hotplug: scanning\n"); |
---|
| 1195 | + tb_scan_port(port); |
---|
| 1196 | + if (!port->remote) |
---|
| 1197 | + tb_port_dbg(port, "hotplug: no switch found\n"); |
---|
| 1198 | + } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { |
---|
| 1199 | + tb_dp_resource_available(tb, port); |
---|
307 | 1200 | } |
---|
308 | 1201 | } |
---|
| 1202 | + |
---|
| 1203 | + pm_runtime_mark_last_busy(&sw->dev); |
---|
| 1204 | + pm_runtime_put_autosuspend(&sw->dev); |
---|
309 | 1205 | |
---|
310 | 1206 | put_sw: |
---|
311 | 1207 | tb_switch_put(sw); |
---|
312 | 1208 | out: |
---|
313 | 1209 | mutex_unlock(&tb->lock); |
---|
| 1210 | + |
---|
| 1211 | + pm_runtime_mark_last_busy(&tb->dev); |
---|
| 1212 | + pm_runtime_put_autosuspend(&tb->dev); |
---|
| 1213 | + |
---|
314 | 1214 | kfree(ev); |
---|
315 | 1215 | } |
---|
316 | 1216 | |
---|
.. | .. |
---|
323 | 1223 | const void *buf, size_t size) |
---|
324 | 1224 | { |
---|
325 | 1225 | const struct cfg_event_pkg *pkg = buf; |
---|
326 | | - struct tb_hotplug_event *ev; |
---|
327 | 1226 | u64 route; |
---|
328 | 1227 | |
---|
329 | 1228 | if (type != TB_CFG_PKG_EVENT) { |
---|
.. | .. |
---|
333 | 1232 | |
---|
334 | 1233 | route = tb_cfg_get_route(&pkg->header); |
---|
335 | 1234 | |
---|
336 | | - if (tb_cfg_error(tb->ctl, route, pkg->port, |
---|
337 | | - TB_CFG_ERROR_ACK_PLUG_EVENT)) { |
---|
| 1235 | + if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { |
---|
338 | 1236 | tb_warn(tb, "could not ack plug event on %llx:%x\n", route, |
---|
339 | 1237 | pkg->port); |
---|
340 | 1238 | } |
---|
341 | 1239 | |
---|
342 | | - ev = kmalloc(sizeof(*ev), GFP_KERNEL); |
---|
343 | | - if (!ev) |
---|
344 | | - return; |
---|
345 | | - INIT_WORK(&ev->work, tb_handle_hotplug); |
---|
346 | | - ev->tb = tb; |
---|
347 | | - ev->route = route; |
---|
348 | | - ev->port = pkg->port; |
---|
349 | | - ev->unplug = pkg->unplug; |
---|
350 | | - queue_work(tb->wq, &ev->work); |
---|
| 1240 | + tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); |
---|
351 | 1241 | } |
---|
352 | 1242 | |
---|
353 | 1243 | static void tb_stop(struct tb *tb) |
---|
354 | 1244 | { |
---|
355 | 1245 | struct tb_cm *tcm = tb_priv(tb); |
---|
356 | | - struct tb_pci_tunnel *tunnel; |
---|
357 | | - struct tb_pci_tunnel *n; |
---|
| 1246 | + struct tb_tunnel *tunnel; |
---|
| 1247 | + struct tb_tunnel *n; |
---|
358 | 1248 | |
---|
| 1249 | + cancel_delayed_work(&tcm->remove_work); |
---|
359 | 1250 | /* tunnels are only present after everything has been initialized */ |
---|
360 | 1251 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
---|
361 | | - tb_pci_deactivate(tunnel); |
---|
362 | | - tb_pci_free(tunnel); |
---|
| 1252 | + /* |
---|
| 1253 | + * DMA tunnels require the driver to be functional so we |
---|
| 1254 | + * tear them down. Other protocol tunnels can be left |
---|
| 1255 | + * intact. |
---|
| 1256 | + */ |
---|
| 1257 | + if (tb_tunnel_is_dma(tunnel)) |
---|
| 1258 | + tb_tunnel_deactivate(tunnel); |
---|
| 1259 | + tb_tunnel_free(tunnel); |
---|
363 | 1260 | } |
---|
364 | 1261 | tb_switch_remove(tb->root_switch); |
---|
365 | 1262 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
---|
| 1263 | +} |
---|
| 1264 | + |
---|
| 1265 | +static int tb_scan_finalize_switch(struct device *dev, void *data) |
---|
| 1266 | +{ |
---|
| 1267 | + if (tb_is_switch(dev)) { |
---|
| 1268 | + struct tb_switch *sw = tb_to_switch(dev); |
---|
| 1269 | + |
---|
| 1270 | + /* |
---|
| 1271 | + * If we found that the switch was already setup by the |
---|
| 1272 | + * boot firmware, mark it as authorized now before we |
---|
| 1273 | + * send uevent to userspace. |
---|
| 1274 | + */ |
---|
| 1275 | + if (sw->boot) |
---|
| 1276 | + sw->authorized = 1; |
---|
| 1277 | + |
---|
| 1278 | + dev_set_uevent_suppress(dev, false); |
---|
| 1279 | + kobject_uevent(&dev->kobj, KOBJ_ADD); |
---|
| 1280 | + device_for_each_child(dev, NULL, tb_scan_finalize_switch); |
---|
| 1281 | + } |
---|
| 1282 | + |
---|
| 1283 | + return 0; |
---|
366 | 1284 | } |
---|
367 | 1285 | |
---|
368 | 1286 | static int tb_start(struct tb *tb) |
---|
.. | .. |
---|
371 | 1289 | int ret; |
---|
372 | 1290 | |
---|
373 | 1291 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); |
---|
374 | | - if (!tb->root_switch) |
---|
375 | | - return -ENOMEM; |
---|
| 1292 | + if (IS_ERR(tb->root_switch)) |
---|
| 1293 | + return PTR_ERR(tb->root_switch); |
---|
376 | 1294 | |
---|
377 | 1295 | /* |
---|
378 | 1296 | * ICM firmware upgrade needs running firmware and in native |
---|
.. | .. |
---|
380 | 1298 | * root switch. |
---|
381 | 1299 | */ |
---|
382 | 1300 | tb->root_switch->no_nvm_upgrade = true; |
---|
| 1301 | + /* All USB4 routers support runtime PM */ |
---|
| 1302 | + tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); |
---|
383 | 1303 | |
---|
384 | 1304 | ret = tb_switch_configure(tb->root_switch); |
---|
385 | 1305 | if (ret) { |
---|
.. | .. |
---|
394 | 1314 | return ret; |
---|
395 | 1315 | } |
---|
396 | 1316 | |
---|
| 1317 | + /* Enable TMU if it is off */ |
---|
| 1318 | + tb_switch_tmu_enable(tb->root_switch); |
---|
397 | 1319 | /* Full scan to discover devices added before the driver was loaded. */ |
---|
398 | 1320 | tb_scan_switch(tb->root_switch); |
---|
399 | | - tb_activate_pcie_devices(tb); |
---|
| 1321 | + /* Find out tunnels created by the boot firmware */ |
---|
| 1322 | + tb_discover_tunnels(tb->root_switch); |
---|
| 1323 | + /* |
---|
| 1324 | + * If the boot firmware did not create USB 3.x tunnels create them |
---|
| 1325 | + * now for the whole topology. |
---|
| 1326 | + */ |
---|
| 1327 | + tb_create_usb3_tunnels(tb->root_switch); |
---|
| 1328 | + /* Add DP IN resources for the root switch */ |
---|
| 1329 | + tb_add_dp_resources(tb->root_switch); |
---|
| 1330 | + /* Make the discovered switches available to the userspace */ |
---|
| 1331 | + device_for_each_child(&tb->root_switch->dev, NULL, |
---|
| 1332 | + tb_scan_finalize_switch); |
---|
400 | 1333 | |
---|
401 | 1334 | /* Allow tb_handle_hotplug to progress events */ |
---|
402 | 1335 | tcm->hotplug_active = true; |
---|
.. | .. |
---|
407 | 1340 | { |
---|
408 | 1341 | struct tb_cm *tcm = tb_priv(tb); |
---|
409 | 1342 | |
---|
410 | | - tb_info(tb, "suspending...\n"); |
---|
411 | | - tb_switch_suspend(tb->root_switch); |
---|
| 1343 | + tb_dbg(tb, "suspending...\n"); |
---|
| 1344 | + tb_disconnect_and_release_dp(tb); |
---|
| 1345 | + tb_switch_suspend(tb->root_switch, false); |
---|
412 | 1346 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
---|
413 | | - tb_info(tb, "suspend finished\n"); |
---|
| 1347 | + tb_dbg(tb, "suspend finished\n"); |
---|
414 | 1348 | |
---|
415 | 1349 | return 0; |
---|
| 1350 | +} |
---|
| 1351 | + |
---|
| 1352 | +static void tb_restore_children(struct tb_switch *sw) |
---|
| 1353 | +{ |
---|
| 1354 | + struct tb_port *port; |
---|
| 1355 | + |
---|
| 1356 | + /* No need to restore if the router is already unplugged */ |
---|
| 1357 | + if (sw->is_unplugged) |
---|
| 1358 | + return; |
---|
| 1359 | + |
---|
| 1360 | + if (tb_enable_tmu(sw)) |
---|
| 1361 | + tb_sw_warn(sw, "failed to restore TMU configuration\n"); |
---|
| 1362 | + |
---|
| 1363 | + tb_switch_for_each_port(sw, port) { |
---|
| 1364 | + if (!tb_port_has_remote(port) && !port->xdomain) |
---|
| 1365 | + continue; |
---|
| 1366 | + |
---|
| 1367 | + if (port->remote) { |
---|
| 1368 | + tb_switch_lane_bonding_enable(port->remote->sw); |
---|
| 1369 | + tb_switch_configure_link(port->remote->sw); |
---|
| 1370 | + |
---|
| 1371 | + tb_restore_children(port->remote->sw); |
---|
| 1372 | + } else if (port->xdomain) { |
---|
| 1373 | + tb_port_configure_xdomain(port); |
---|
| 1374 | + } |
---|
| 1375 | + } |
---|
416 | 1376 | } |
---|
417 | 1377 | |
---|
418 | 1378 | static int tb_resume_noirq(struct tb *tb) |
---|
419 | 1379 | { |
---|
420 | 1380 | struct tb_cm *tcm = tb_priv(tb); |
---|
421 | | - struct tb_pci_tunnel *tunnel, *n; |
---|
| 1381 | + struct tb_tunnel *tunnel, *n; |
---|
422 | 1382 | |
---|
423 | | - tb_info(tb, "resuming...\n"); |
---|
| 1383 | + tb_dbg(tb, "resuming...\n"); |
---|
424 | 1384 | |
---|
425 | 1385 | /* remove any pci devices the firmware might have setup */ |
---|
426 | | - tb_switch_reset(tb, 0); |
---|
| 1386 | + tb_switch_reset(tb->root_switch); |
---|
427 | 1387 | |
---|
428 | 1388 | tb_switch_resume(tb->root_switch); |
---|
429 | 1389 | tb_free_invalid_tunnels(tb); |
---|
430 | 1390 | tb_free_unplugged_children(tb->root_switch); |
---|
| 1391 | + tb_restore_children(tb->root_switch); |
---|
431 | 1392 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) |
---|
432 | | - tb_pci_restart(tunnel); |
---|
| 1393 | + tb_tunnel_restart(tunnel); |
---|
433 | 1394 | if (!list_empty(&tcm->tunnel_list)) { |
---|
434 | 1395 | /* |
---|
435 | 1396 | * the pcie links need some time to get going. |
---|
436 | 1397 | * 100ms works for me... |
---|
437 | 1398 | */ |
---|
438 | | - tb_info(tb, "tunnels restarted, sleeping for 100ms\n"); |
---|
| 1399 | + tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); |
---|
439 | 1400 | msleep(100); |
---|
440 | 1401 | } |
---|
441 | 1402 | /* Allow tb_handle_hotplug to progress events */ |
---|
442 | 1403 | tcm->hotplug_active = true; |
---|
443 | | - tb_info(tb, "resume finished\n"); |
---|
| 1404 | + tb_dbg(tb, "resume finished\n"); |
---|
444 | 1405 | |
---|
| 1406 | + return 0; |
---|
| 1407 | +} |
---|
| 1408 | + |
---|
| 1409 | +static int tb_free_unplugged_xdomains(struct tb_switch *sw) |
---|
| 1410 | +{ |
---|
| 1411 | + struct tb_port *port; |
---|
| 1412 | + int ret = 0; |
---|
| 1413 | + |
---|
| 1414 | + tb_switch_for_each_port(sw, port) { |
---|
| 1415 | + if (tb_is_upstream_port(port)) |
---|
| 1416 | + continue; |
---|
| 1417 | + if (port->xdomain && port->xdomain->is_unplugged) { |
---|
| 1418 | + tb_retimer_remove_all(port); |
---|
| 1419 | + tb_xdomain_remove(port->xdomain); |
---|
| 1420 | + tb_port_unconfigure_xdomain(port); |
---|
| 1421 | + port->xdomain = NULL; |
---|
| 1422 | + ret++; |
---|
| 1423 | + } else if (port->remote) { |
---|
| 1424 | + ret += tb_free_unplugged_xdomains(port->remote->sw); |
---|
| 1425 | + } |
---|
| 1426 | + } |
---|
| 1427 | + |
---|
| 1428 | + return ret; |
---|
| 1429 | +} |
---|
| 1430 | + |
---|
| 1431 | +static int tb_freeze_noirq(struct tb *tb) |
---|
| 1432 | +{ |
---|
| 1433 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 1434 | + |
---|
| 1435 | + tcm->hotplug_active = false; |
---|
| 1436 | + return 0; |
---|
| 1437 | +} |
---|
| 1438 | + |
---|
| 1439 | +static int tb_thaw_noirq(struct tb *tb) |
---|
| 1440 | +{ |
---|
| 1441 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 1442 | + |
---|
| 1443 | + tcm->hotplug_active = true; |
---|
| 1444 | + return 0; |
---|
| 1445 | +} |
---|
| 1446 | + |
---|
| 1447 | +static void tb_complete(struct tb *tb) |
---|
| 1448 | +{ |
---|
| 1449 | + /* |
---|
| 1450 | + * Release any unplugged XDomains and if there is a case where |
---|
| 1451 | + * another domain is swapped in place of unplugged XDomain we |
---|
| 1452 | + * need to run another rescan. |
---|
| 1453 | + */ |
---|
| 1454 | + mutex_lock(&tb->lock); |
---|
| 1455 | + if (tb_free_unplugged_xdomains(tb->root_switch)) |
---|
| 1456 | + tb_scan_switch(tb->root_switch); |
---|
| 1457 | + mutex_unlock(&tb->lock); |
---|
| 1458 | +} |
---|
| 1459 | + |
---|
| 1460 | +static int tb_runtime_suspend(struct tb *tb) |
---|
| 1461 | +{ |
---|
| 1462 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 1463 | + |
---|
| 1464 | + mutex_lock(&tb->lock); |
---|
| 1465 | + tb_switch_suspend(tb->root_switch, true); |
---|
| 1466 | + tcm->hotplug_active = false; |
---|
| 1467 | + mutex_unlock(&tb->lock); |
---|
| 1468 | + |
---|
| 1469 | + return 0; |
---|
| 1470 | +} |
---|
| 1471 | + |
---|
| 1472 | +static void tb_remove_work(struct work_struct *work) |
---|
| 1473 | +{ |
---|
| 1474 | + struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); |
---|
| 1475 | + struct tb *tb = tcm_to_tb(tcm); |
---|
| 1476 | + |
---|
| 1477 | + mutex_lock(&tb->lock); |
---|
| 1478 | + if (tb->root_switch) { |
---|
| 1479 | + tb_free_unplugged_children(tb->root_switch); |
---|
| 1480 | + tb_free_unplugged_xdomains(tb->root_switch); |
---|
| 1481 | + } |
---|
| 1482 | + mutex_unlock(&tb->lock); |
---|
| 1483 | +} |
---|
| 1484 | + |
---|
| 1485 | +static int tb_runtime_resume(struct tb *tb) |
---|
| 1486 | +{ |
---|
| 1487 | + struct tb_cm *tcm = tb_priv(tb); |
---|
| 1488 | + struct tb_tunnel *tunnel, *n; |
---|
| 1489 | + |
---|
| 1490 | + mutex_lock(&tb->lock); |
---|
| 1491 | + tb_switch_resume(tb->root_switch); |
---|
| 1492 | + tb_free_invalid_tunnels(tb); |
---|
| 1493 | + tb_restore_children(tb->root_switch); |
---|
| 1494 | + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) |
---|
| 1495 | + tb_tunnel_restart(tunnel); |
---|
| 1496 | + tcm->hotplug_active = true; |
---|
| 1497 | + mutex_unlock(&tb->lock); |
---|
| 1498 | + |
---|
| 1499 | + /* |
---|
| 1500 | + * Schedule cleanup of any unplugged devices. Run this in a |
---|
| 1501 | + * separate thread to avoid possible deadlock if the device |
---|
| 1502 | + * removal runtime resumes the unplugged device. |
---|
| 1503 | + */ |
---|
| 1504 | + queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); |
---|
445 | 1505 | return 0; |
---|
446 | 1506 | } |
---|
447 | 1507 | |
---|
.. | .. |
---|
450 | 1510 | .stop = tb_stop, |
---|
451 | 1511 | .suspend_noirq = tb_suspend_noirq, |
---|
452 | 1512 | .resume_noirq = tb_resume_noirq, |
---|
| 1513 | + .freeze_noirq = tb_freeze_noirq, |
---|
| 1514 | + .thaw_noirq = tb_thaw_noirq, |
---|
| 1515 | + .complete = tb_complete, |
---|
| 1516 | + .runtime_suspend = tb_runtime_suspend, |
---|
| 1517 | + .runtime_resume = tb_runtime_resume, |
---|
453 | 1518 | .handle_event = tb_handle_event, |
---|
| 1519 | + .approve_switch = tb_tunnel_pci, |
---|
| 1520 | + .approve_xdomain_paths = tb_approve_xdomain_paths, |
---|
| 1521 | + .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, |
---|
454 | 1522 | }; |
---|
455 | 1523 | |
---|
456 | 1524 | struct tb *tb_probe(struct tb_nhi *nhi) |
---|
.. | .. |
---|
458 | 1526 | struct tb_cm *tcm; |
---|
459 | 1527 | struct tb *tb; |
---|
460 | 1528 | |
---|
461 | | - if (!x86_apple_machine) |
---|
462 | | - return NULL; |
---|
463 | | - |
---|
464 | 1529 | tb = tb_domain_alloc(nhi, sizeof(*tcm)); |
---|
465 | 1530 | if (!tb) |
---|
466 | 1531 | return NULL; |
---|
467 | 1532 | |
---|
468 | | - tb->security_level = TB_SECURITY_NONE; |
---|
| 1533 | + tb->security_level = TB_SECURITY_USER; |
---|
469 | 1534 | tb->cm_ops = &tb_cm_ops; |
---|
470 | 1535 | |
---|
471 | 1536 | tcm = tb_priv(tb); |
---|
472 | 1537 | INIT_LIST_HEAD(&tcm->tunnel_list); |
---|
| 1538 | + INIT_LIST_HEAD(&tcm->dp_resources); |
---|
| 1539 | + INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); |
---|
473 | 1540 | |
---|
474 | 1541 | return tb; |
---|
475 | 1542 | } |
---|