1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - bus logic (NHI independent)
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT 100 /* ms */
20
21 /**
22 * struct tb_cm - Simple Thunderbolt connection manager
23 * @tunnel_list: List of active tunnels
24 * @dp_resources: List of available DP resources for DP tunneling
25 * @hotplug_active: tb_handle_hotplug will stop progressing plug
26 * events and exit if this is not set (it needs to
27 * acquire the lock one more time). Used to drain wq
28 * after cfg has been paused.
29 * @remove_work: Work used to remove any unplugged routers after
30 * runtime resume
31 */
32 struct tb_cm {
33 struct list_head tunnel_list;
34 struct list_head dp_resources;
35 bool hotplug_active;
36 struct delayed_work remove_work;
37 };
38
tcm_to_tb(struct tb_cm * tcm)39 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
40 {
41 return ((void *)tcm - sizeof(struct tb));
42 }
43
44 struct tb_hotplug_event {
45 struct work_struct work;
46 struct tb *tb;
47 u64 route;
48 u8 port;
49 bool unplug;
50 };
51
52 static void tb_handle_hotplug(struct work_struct *work);
53
tb_queue_hotplug(struct tb * tb,u64 route,u8 port,bool unplug)54 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
55 {
56 struct tb_hotplug_event *ev;
57
58 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
59 if (!ev)
60 return;
61
62 ev->tb = tb;
63 ev->route = route;
64 ev->port = port;
65 ev->unplug = unplug;
66 INIT_WORK(&ev->work, tb_handle_hotplug);
67 queue_work(tb->wq, &ev->work);
68 }
69
70 /* enumeration & hot plug handling */
71
tb_add_dp_resources(struct tb_switch * sw)72 static void tb_add_dp_resources(struct tb_switch *sw)
73 {
74 struct tb_cm *tcm = tb_priv(sw->tb);
75 struct tb_port *port;
76
77 tb_switch_for_each_port(sw, port) {
78 if (!tb_port_is_dpin(port))
79 continue;
80
81 if (!tb_switch_query_dp_resource(sw, port))
82 continue;
83
84 list_add_tail(&port->list, &tcm->dp_resources);
85 tb_port_dbg(port, "DP IN resource available\n");
86 }
87 }
88
tb_remove_dp_resources(struct tb_switch * sw)89 static void tb_remove_dp_resources(struct tb_switch *sw)
90 {
91 struct tb_cm *tcm = tb_priv(sw->tb);
92 struct tb_port *port, *tmp;
93
94 /* Clear children resources first */
95 tb_switch_for_each_port(sw, port) {
96 if (tb_port_has_remote(port))
97 tb_remove_dp_resources(port->remote->sw);
98 }
99
100 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
101 if (port->sw == sw) {
102 tb_port_dbg(port, "DP OUT resource unavailable\n");
103 list_del_init(&port->list);
104 }
105 }
106 }
107
tb_discover_dp_resource(struct tb * tb,struct tb_port * port)108 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
109 {
110 struct tb_cm *tcm = tb_priv(tb);
111 struct tb_port *p;
112
113 list_for_each_entry(p, &tcm->dp_resources, list) {
114 if (p == port)
115 return;
116 }
117
118 tb_port_dbg(port, "DP %s resource available discovered\n",
119 tb_port_is_dpin(port) ? "IN" : "OUT");
120 list_add_tail(&port->list, &tcm->dp_resources);
121 }
122
tb_discover_dp_resources(struct tb * tb)123 static void tb_discover_dp_resources(struct tb *tb)
124 {
125 struct tb_cm *tcm = tb_priv(tb);
126 struct tb_tunnel *tunnel;
127
128 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
129 if (tb_tunnel_is_dp(tunnel))
130 tb_discover_dp_resource(tb, tunnel->dst_port);
131 }
132 }
133
tb_switch_discover_tunnels(struct tb_switch * sw,struct list_head * list,bool alloc_hopids)134 static void tb_switch_discover_tunnels(struct tb_switch *sw,
135 struct list_head *list,
136 bool alloc_hopids)
137 {
138 struct tb *tb = sw->tb;
139 struct tb_port *port;
140
141 tb_switch_for_each_port(sw, port) {
142 struct tb_tunnel *tunnel = NULL;
143
144 switch (port->config.type) {
145 case TB_TYPE_DP_HDMI_IN:
146 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
147 /*
148 * In case of DP tunnel exists, change host router's
149 * 1st children TMU mode to HiFi for CL0s to work.
150 */
151 if (tunnel)
152 tb_switch_enable_tmu_1st_child(tb->root_switch,
153 TB_SWITCH_TMU_RATE_HIFI);
154 break;
155
156 case TB_TYPE_PCIE_DOWN:
157 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
158 break;
159
160 case TB_TYPE_USB3_DOWN:
161 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
162 break;
163
164 default:
165 break;
166 }
167
168 if (tunnel)
169 list_add_tail(&tunnel->list, list);
170 }
171
172 tb_switch_for_each_port(sw, port) {
173 if (tb_port_has_remote(port)) {
174 tb_switch_discover_tunnels(port->remote->sw, list,
175 alloc_hopids);
176 }
177 }
178 }
179
tb_discover_tunnels(struct tb * tb)180 static void tb_discover_tunnels(struct tb *tb)
181 {
182 struct tb_cm *tcm = tb_priv(tb);
183 struct tb_tunnel *tunnel;
184
185 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
186
187 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
188 if (tb_tunnel_is_pci(tunnel)) {
189 struct tb_switch *parent = tunnel->dst_port->sw;
190
191 while (parent != tunnel->src_port->sw) {
192 parent->boot = true;
193 parent = tb_switch_parent(parent);
194 }
195 } else if (tb_tunnel_is_dp(tunnel)) {
196 /* Keep the domain from powering down */
197 pm_runtime_get_sync(&tunnel->src_port->sw->dev);
198 pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
199 }
200 }
201 }
202
tb_port_configure_xdomain(struct tb_port * port,struct tb_xdomain * xd)203 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
204 {
205 if (tb_switch_is_usb4(port->sw))
206 return usb4_port_configure_xdomain(port, xd);
207 return tb_lc_configure_xdomain(port);
208 }
209
tb_port_unconfigure_xdomain(struct tb_port * port)210 static void tb_port_unconfigure_xdomain(struct tb_port *port)
211 {
212 if (tb_switch_is_usb4(port->sw))
213 usb4_port_unconfigure_xdomain(port);
214 else
215 tb_lc_unconfigure_xdomain(port);
216
217 tb_port_enable(port->dual_link_port);
218 }
219
tb_scan_xdomain(struct tb_port * port)220 static void tb_scan_xdomain(struct tb_port *port)
221 {
222 struct tb_switch *sw = port->sw;
223 struct tb *tb = sw->tb;
224 struct tb_xdomain *xd;
225 u64 route;
226
227 if (!tb_is_xdomain_enabled())
228 return;
229
230 route = tb_downstream_route(port);
231 xd = tb_xdomain_find_by_route(tb, route);
232 if (xd) {
233 tb_xdomain_put(xd);
234 return;
235 }
236
237 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
238 NULL);
239 if (xd) {
240 tb_port_at(route, sw)->xdomain = xd;
241 tb_port_configure_xdomain(port, xd);
242 tb_xdomain_add(xd);
243 }
244 }
245
tb_enable_tmu(struct tb_switch * sw)246 static int tb_enable_tmu(struct tb_switch *sw)
247 {
248 int ret;
249
250 /* If it is already enabled in correct mode, don't touch it */
251 if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
252 return 0;
253
254 ret = tb_switch_tmu_disable(sw);
255 if (ret)
256 return ret;
257
258 ret = tb_switch_tmu_post_time(sw);
259 if (ret)
260 return ret;
261
262 return tb_switch_tmu_enable(sw);
263 }
264
265 /**
266 * tb_find_unused_port() - return the first inactive port on @sw
267 * @sw: Switch to find the port on
268 * @type: Port type to look for
269 */
tb_find_unused_port(struct tb_switch * sw,enum tb_port_type type)270 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
271 enum tb_port_type type)
272 {
273 struct tb_port *port;
274
275 tb_switch_for_each_port(sw, port) {
276 if (tb_is_upstream_port(port))
277 continue;
278 if (port->config.type != type)
279 continue;
280 if (!port->cap_adap)
281 continue;
282 if (tb_port_is_enabled(port))
283 continue;
284 return port;
285 }
286 return NULL;
287 }
288
tb_find_usb3_down(struct tb_switch * sw,const struct tb_port * port)289 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
290 const struct tb_port *port)
291 {
292 struct tb_port *down;
293
294 down = usb4_switch_map_usb3_down(sw, port);
295 if (down && !tb_usb3_port_is_enabled(down))
296 return down;
297 return NULL;
298 }
299
tb_find_tunnel(struct tb * tb,enum tb_tunnel_type type,struct tb_port * src_port,struct tb_port * dst_port)300 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
301 struct tb_port *src_port,
302 struct tb_port *dst_port)
303 {
304 struct tb_cm *tcm = tb_priv(tb);
305 struct tb_tunnel *tunnel;
306
307 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
308 if (tunnel->type == type &&
309 ((src_port && src_port == tunnel->src_port) ||
310 (dst_port && dst_port == tunnel->dst_port))) {
311 return tunnel;
312 }
313 }
314
315 return NULL;
316 }
317
tb_find_first_usb3_tunnel(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)318 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
319 struct tb_port *src_port,
320 struct tb_port *dst_port)
321 {
322 struct tb_port *port, *usb3_down;
323 struct tb_switch *sw;
324
325 /* Pick the router that is deepest in the topology */
326 if (dst_port->sw->config.depth > src_port->sw->config.depth)
327 sw = dst_port->sw;
328 else
329 sw = src_port->sw;
330
331 /* Can't be the host router */
332 if (sw == tb->root_switch)
333 return NULL;
334
335 /* Find the downstream USB4 port that leads to this router */
336 port = tb_port_at(tb_route(sw), tb->root_switch);
337 /* Find the corresponding host router USB3 downstream port */
338 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
339 if (!usb3_down)
340 return NULL;
341
342 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
343 }
344
tb_available_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port,int * available_up,int * available_down)345 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
346 struct tb_port *dst_port, int *available_up, int *available_down)
347 {
348 int usb3_consumed_up, usb3_consumed_down, ret;
349 struct tb_cm *tcm = tb_priv(tb);
350 struct tb_tunnel *tunnel;
351 struct tb_port *port;
352
353 tb_port_dbg(dst_port, "calculating available bandwidth\n");
354
355 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
356 if (tunnel) {
357 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
358 &usb3_consumed_down);
359 if (ret)
360 return ret;
361 } else {
362 usb3_consumed_up = 0;
363 usb3_consumed_down = 0;
364 }
365
366 *available_up = *available_down = 40000;
367
368 /* Find the minimum available bandwidth over all links */
369 tb_for_each_port_on_path(src_port, dst_port, port) {
370 int link_speed, link_width, up_bw, down_bw;
371
372 if (!tb_port_is_null(port))
373 continue;
374
375 if (tb_is_upstream_port(port)) {
376 link_speed = port->sw->link_speed;
377 } else {
378 link_speed = tb_port_get_link_speed(port);
379 if (link_speed < 0)
380 return link_speed;
381 }
382
383 link_width = port->bonded ? 2 : 1;
384
385 up_bw = link_speed * link_width * 1000; /* Mb/s */
386 /* Leave 10% guard band */
387 up_bw -= up_bw / 10;
388 down_bw = up_bw;
389
390 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
391
392 /*
393 * Find all DP tunnels that cross the port and reduce
394 * their consumed bandwidth from the available.
395 */
396 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
397 int dp_consumed_up, dp_consumed_down;
398
399 if (!tb_tunnel_is_dp(tunnel))
400 continue;
401
402 if (!tb_tunnel_port_on_path(tunnel, port))
403 continue;
404
405 ret = tb_tunnel_consumed_bandwidth(tunnel,
406 &dp_consumed_up,
407 &dp_consumed_down);
408 if (ret)
409 return ret;
410
411 up_bw -= dp_consumed_up;
412 down_bw -= dp_consumed_down;
413 }
414
415 /*
416 * If USB3 is tunneled from the host router down to the
417 * branch leading to port we need to take USB3 consumed
418 * bandwidth into account regardless whether it actually
419 * crosses the port.
420 */
421 up_bw -= usb3_consumed_up;
422 down_bw -= usb3_consumed_down;
423
424 if (up_bw < *available_up)
425 *available_up = up_bw;
426 if (down_bw < *available_down)
427 *available_down = down_bw;
428 }
429
430 if (*available_up < 0)
431 *available_up = 0;
432 if (*available_down < 0)
433 *available_down = 0;
434
435 return 0;
436 }
437
tb_release_unused_usb3_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)438 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
439 struct tb_port *src_port,
440 struct tb_port *dst_port)
441 {
442 struct tb_tunnel *tunnel;
443
444 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
445 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
446 }
447
tb_reclaim_usb3_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)448 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
449 struct tb_port *dst_port)
450 {
451 int ret, available_up, available_down;
452 struct tb_tunnel *tunnel;
453
454 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
455 if (!tunnel)
456 return;
457
458 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
459
460 /*
461 * Calculate available bandwidth for the first hop USB3 tunnel.
462 * That determines the whole USB3 bandwidth for this branch.
463 */
464 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
465 &available_up, &available_down);
466 if (ret) {
467 tb_warn(tb, "failed to calculate available bandwidth\n");
468 return;
469 }
470
471 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
472 available_up, available_down);
473
474 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
475 }
476
tb_tunnel_usb3(struct tb * tb,struct tb_switch * sw)477 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
478 {
479 struct tb_switch *parent = tb_switch_parent(sw);
480 int ret, available_up, available_down;
481 struct tb_port *up, *down, *port;
482 struct tb_cm *tcm = tb_priv(tb);
483 struct tb_tunnel *tunnel;
484
485 if (!tb_acpi_may_tunnel_usb3()) {
486 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
487 return 0;
488 }
489
490 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
491 if (!up)
492 return 0;
493
494 if (!sw->link_usb4)
495 return 0;
496
497 /*
498 * Look up available down port. Since we are chaining it should
499 * be found right above this switch.
500 */
501 port = tb_port_at(tb_route(sw), parent);
502 down = tb_find_usb3_down(parent, port);
503 if (!down)
504 return 0;
505
506 if (tb_route(parent)) {
507 struct tb_port *parent_up;
508 /*
509 * Check first that the parent switch has its upstream USB3
510 * port enabled. Otherwise the chain is not complete and
511 * there is no point setting up a new tunnel.
512 */
513 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
514 if (!parent_up || !tb_port_is_enabled(parent_up))
515 return 0;
516
517 /* Make all unused bandwidth available for the new tunnel */
518 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
519 if (ret)
520 return ret;
521 }
522
523 ret = tb_available_bandwidth(tb, down, up, &available_up,
524 &available_down);
525 if (ret)
526 goto err_reclaim;
527
528 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
529 available_up, available_down);
530
531 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
532 available_down);
533 if (!tunnel) {
534 ret = -ENOMEM;
535 goto err_reclaim;
536 }
537
538 if (tb_tunnel_activate(tunnel)) {
539 tb_port_info(up,
540 "USB3 tunnel activation failed, aborting\n");
541 ret = -EIO;
542 goto err_free;
543 }
544
545 list_add_tail(&tunnel->list, &tcm->tunnel_list);
546 if (tb_route(parent))
547 tb_reclaim_usb3_bandwidth(tb, down, up);
548
549 return 0;
550
551 err_free:
552 tb_tunnel_free(tunnel);
553 err_reclaim:
554 if (tb_route(parent))
555 tb_reclaim_usb3_bandwidth(tb, down, up);
556
557 return ret;
558 }
559
tb_create_usb3_tunnels(struct tb_switch * sw)560 static int tb_create_usb3_tunnels(struct tb_switch *sw)
561 {
562 struct tb_port *port;
563 int ret;
564
565 if (!tb_acpi_may_tunnel_usb3())
566 return 0;
567
568 if (tb_route(sw)) {
569 ret = tb_tunnel_usb3(sw->tb, sw);
570 if (ret)
571 return ret;
572 }
573
574 tb_switch_for_each_port(sw, port) {
575 if (!tb_port_has_remote(port))
576 continue;
577 ret = tb_create_usb3_tunnels(port->remote->sw);
578 if (ret)
579 return ret;
580 }
581
582 return 0;
583 }
584
585 static void tb_scan_port(struct tb_port *port);
586
587 /*
588 * tb_scan_switch() - scan for and initialize downstream switches
589 */
tb_scan_switch(struct tb_switch * sw)590 static void tb_scan_switch(struct tb_switch *sw)
591 {
592 struct tb_port *port;
593
594 pm_runtime_get_sync(&sw->dev);
595
596 tb_switch_for_each_port(sw, port)
597 tb_scan_port(port);
598
599 pm_runtime_mark_last_busy(&sw->dev);
600 pm_runtime_put_autosuspend(&sw->dev);
601 }
602
603 /*
604 * tb_scan_port() - check for and initialize switches below port
605 */
tb_scan_port(struct tb_port * port)606 static void tb_scan_port(struct tb_port *port)
607 {
608 struct tb_cm *tcm = tb_priv(port->sw->tb);
609 struct tb_port *upstream_port;
610 struct tb_switch *sw;
611 int ret;
612
613 if (tb_is_upstream_port(port))
614 return;
615
616 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
617 !tb_dp_port_is_enabled(port)) {
618 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
619 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
620 false);
621 return;
622 }
623
624 if (port->config.type != TB_TYPE_PORT)
625 return;
626 if (port->dual_link_port && port->link_nr)
627 return; /*
628 * Downstream switch is reachable through two ports.
629 * Only scan on the primary port (link_nr == 0).
630 */
631 if (tb_wait_for_port(port, false) <= 0)
632 return;
633 if (port->remote) {
634 tb_port_dbg(port, "port already has a remote\n");
635 return;
636 }
637
638 tb_retimer_scan(port, true);
639
640 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
641 tb_downstream_route(port));
642 if (IS_ERR(sw)) {
643 /*
644 * If there is an error accessing the connected switch
645 * it may be connected to another domain. Also we allow
646 * the other domain to be connected to a max depth switch.
647 */
648 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
649 tb_scan_xdomain(port);
650 return;
651 }
652
653 if (tb_switch_configure(sw)) {
654 tb_switch_put(sw);
655 return;
656 }
657
658 /*
659 * If there was previously another domain connected remove it
660 * first.
661 */
662 if (port->xdomain) {
663 tb_xdomain_remove(port->xdomain);
664 tb_port_unconfigure_xdomain(port);
665 port->xdomain = NULL;
666 }
667
668 /*
669 * Do not send uevents until we have discovered all existing
670 * tunnels and know which switches were authorized already by
671 * the boot firmware.
672 */
673 if (!tcm->hotplug_active)
674 dev_set_uevent_suppress(&sw->dev, true);
675
676 /*
677 * At the moment Thunderbolt 2 and beyond (devices with LC) we
678 * can support runtime PM.
679 */
680 sw->rpm = sw->generation > 1;
681
682 if (tb_switch_add(sw)) {
683 tb_switch_put(sw);
684 return;
685 }
686
687 /* Link the switches using both links if available */
688 upstream_port = tb_upstream_port(sw);
689 port->remote = upstream_port;
690 upstream_port->remote = port;
691 if (port->dual_link_port && upstream_port->dual_link_port) {
692 port->dual_link_port->remote = upstream_port->dual_link_port;
693 upstream_port->dual_link_port->remote = port->dual_link_port;
694 }
695
696 /* Enable lane bonding if supported */
697 tb_switch_lane_bonding_enable(sw);
698 /* Set the link configured */
699 tb_switch_configure_link(sw);
700 /*
701 * CL0s and CL1 are enabled and supported together.
702 * Silently ignore CLx enabling in case CLx is not supported.
703 */
704 ret = tb_switch_enable_clx(sw, TB_CL1);
705 if (ret && ret != -EOPNOTSUPP)
706 tb_sw_warn(sw, "failed to enable %s on upstream port\n",
707 tb_switch_clx_name(TB_CL1));
708
709 if (tb_switch_is_clx_enabled(sw, TB_CL1))
710 /*
711 * To support highest CLx state, we set router's TMU to
712 * Normal-Uni mode.
713 */
714 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
715 else
716 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
717 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
718
719 if (tb_enable_tmu(sw))
720 tb_sw_warn(sw, "failed to enable TMU\n");
721
722 /* Scan upstream retimers */
723 tb_retimer_scan(upstream_port, true);
724
725 /*
726 * Create USB 3.x tunnels only when the switch is plugged to the
727 * domain. This is because we scan the domain also during discovery
728 * and want to discover existing USB 3.x tunnels before we create
729 * any new.
730 */
731 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
732 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
733
734 tb_add_dp_resources(sw);
735 tb_scan_switch(sw);
736 }
737
tb_deactivate_and_free_tunnel(struct tb_tunnel * tunnel)738 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
739 {
740 struct tb_port *src_port, *dst_port;
741 struct tb *tb;
742
743 if (!tunnel)
744 return;
745
746 tb_tunnel_deactivate(tunnel);
747 list_del(&tunnel->list);
748
749 tb = tunnel->tb;
750 src_port = tunnel->src_port;
751 dst_port = tunnel->dst_port;
752
753 switch (tunnel->type) {
754 case TB_TUNNEL_DP:
755 /*
756 * In case of DP tunnel make sure the DP IN resource is
757 * deallocated properly.
758 */
759 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
760 /* Now we can allow the domain to runtime suspend again */
761 pm_runtime_mark_last_busy(&dst_port->sw->dev);
762 pm_runtime_put_autosuspend(&dst_port->sw->dev);
763 pm_runtime_mark_last_busy(&src_port->sw->dev);
764 pm_runtime_put_autosuspend(&src_port->sw->dev);
765 fallthrough;
766
767 case TB_TUNNEL_USB3:
768 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
769 break;
770
771 default:
772 /*
773 * PCIe and DMA tunnels do not consume guaranteed
774 * bandwidth.
775 */
776 break;
777 }
778
779 tb_tunnel_free(tunnel);
780 }
781
782 /*
783 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
784 */
tb_free_invalid_tunnels(struct tb * tb)785 static void tb_free_invalid_tunnels(struct tb *tb)
786 {
787 struct tb_cm *tcm = tb_priv(tb);
788 struct tb_tunnel *tunnel;
789 struct tb_tunnel *n;
790
791 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
792 if (tb_tunnel_is_invalid(tunnel))
793 tb_deactivate_and_free_tunnel(tunnel);
794 }
795 }
796
797 /*
798 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
799 */
tb_free_unplugged_children(struct tb_switch * sw)800 static void tb_free_unplugged_children(struct tb_switch *sw)
801 {
802 struct tb_port *port;
803
804 tb_switch_for_each_port(sw, port) {
805 if (!tb_port_has_remote(port))
806 continue;
807
808 if (port->remote->sw->is_unplugged) {
809 tb_retimer_remove_all(port);
810 tb_remove_dp_resources(port->remote->sw);
811 tb_switch_unconfigure_link(port->remote->sw);
812 tb_switch_lane_bonding_disable(port->remote->sw);
813 tb_switch_remove(port->remote->sw);
814 port->remote = NULL;
815 if (port->dual_link_port)
816 port->dual_link_port->remote = NULL;
817 } else {
818 tb_free_unplugged_children(port->remote->sw);
819 }
820 }
821 }
822
tb_find_pcie_down(struct tb_switch * sw,const struct tb_port * port)823 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
824 const struct tb_port *port)
825 {
826 struct tb_port *down = NULL;
827
828 /*
829 * To keep plugging devices consistently in the same PCIe
830 * hierarchy, do mapping here for switch downstream PCIe ports.
831 */
832 if (tb_switch_is_usb4(sw)) {
833 down = usb4_switch_map_pcie_down(sw, port);
834 } else if (!tb_route(sw)) {
835 int phy_port = tb_phy_port_from_link(port->port);
836 int index;
837
838 /*
839 * Hard-coded Thunderbolt port to PCIe down port mapping
840 * per controller.
841 */
842 if (tb_switch_is_cactus_ridge(sw) ||
843 tb_switch_is_alpine_ridge(sw))
844 index = !phy_port ? 6 : 7;
845 else if (tb_switch_is_falcon_ridge(sw))
846 index = !phy_port ? 6 : 8;
847 else if (tb_switch_is_titan_ridge(sw))
848 index = !phy_port ? 8 : 9;
849 else
850 goto out;
851
852 /* Validate the hard-coding */
853 if (WARN_ON(index > sw->config.max_port_number))
854 goto out;
855
856 down = &sw->ports[index];
857 }
858
859 if (down) {
860 if (WARN_ON(!tb_port_is_pcie_down(down)))
861 goto out;
862 if (tb_pci_port_is_enabled(down))
863 goto out;
864
865 return down;
866 }
867
868 out:
869 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
870 }
871
tb_find_dp_out(struct tb * tb,struct tb_port * in)872 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
873 {
874 struct tb_port *host_port, *port;
875 struct tb_cm *tcm = tb_priv(tb);
876
877 host_port = tb_route(in->sw) ?
878 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
879
880 list_for_each_entry(port, &tcm->dp_resources, list) {
881 if (!tb_port_is_dpout(port))
882 continue;
883
884 if (tb_port_is_enabled(port)) {
885 tb_port_dbg(port, "in use\n");
886 continue;
887 }
888
889 tb_port_dbg(port, "DP OUT available\n");
890
891 /*
892 * Keep the DP tunnel under the topology starting from
893 * the same host router downstream port.
894 */
895 if (host_port && tb_route(port->sw)) {
896 struct tb_port *p;
897
898 p = tb_port_at(tb_route(port->sw), tb->root_switch);
899 if (p != host_port)
900 continue;
901 }
902
903 return port;
904 }
905
906 return NULL;
907 }
908
tb_tunnel_dp(struct tb * tb)909 static void tb_tunnel_dp(struct tb *tb)
910 {
911 int available_up, available_down, ret, link_nr;
912 struct tb_cm *tcm = tb_priv(tb);
913 struct tb_port *port, *in, *out;
914 struct tb_tunnel *tunnel;
915
916 if (!tb_acpi_may_tunnel_dp()) {
917 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
918 return;
919 }
920
921 /*
922 * Find pair of inactive DP IN and DP OUT adapters and then
923 * establish a DP tunnel between them.
924 */
925 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
926
927 in = NULL;
928 out = NULL;
929 list_for_each_entry(port, &tcm->dp_resources, list) {
930 if (!tb_port_is_dpin(port))
931 continue;
932
933 if (tb_port_is_enabled(port)) {
934 tb_port_dbg(port, "in use\n");
935 continue;
936 }
937
938 tb_port_dbg(port, "DP IN available\n");
939
940 out = tb_find_dp_out(tb, port);
941 if (out) {
942 in = port;
943 break;
944 }
945 }
946
947 if (!in) {
948 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
949 return;
950 }
951 if (!out) {
952 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
953 return;
954 }
955
956 /*
957 * This is only applicable to links that are not bonded (so
958 * when Thunderbolt 1 hardware is involved somewhere in the
959 * topology). For these try to share the DP bandwidth between
960 * the two lanes.
961 */
962 link_nr = 1;
963 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
964 if (tb_tunnel_is_dp(tunnel)) {
965 link_nr = 0;
966 break;
967 }
968 }
969
970 /*
971 * DP stream needs the domain to be active so runtime resume
972 * both ends of the tunnel.
973 *
974 * This should bring the routers in the middle active as well
975 * and keeps the domain from runtime suspending while the DP
976 * tunnel is active.
977 */
978 pm_runtime_get_sync(&in->sw->dev);
979 pm_runtime_get_sync(&out->sw->dev);
980
981 if (tb_switch_alloc_dp_resource(in->sw, in)) {
982 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
983 goto err_rpm_put;
984 }
985
986 /* Make all unused USB3 bandwidth available for the new DP tunnel */
987 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
988 if (ret) {
989 tb_warn(tb, "failed to release unused bandwidth\n");
990 goto err_dealloc_dp;
991 }
992
993 ret = tb_available_bandwidth(tb, in, out, &available_up,
994 &available_down);
995 if (ret)
996 goto err_reclaim;
997
998 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
999 available_up, available_down);
1000
1001 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1002 available_down);
1003 if (!tunnel) {
1004 tb_port_dbg(out, "could not allocate DP tunnel\n");
1005 goto err_reclaim;
1006 }
1007
1008 if (tb_tunnel_activate(tunnel)) {
1009 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1010 goto err_free;
1011 }
1012
1013 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1014 tb_reclaim_usb3_bandwidth(tb, in, out);
1015 /*
1016 * In case of DP tunnel exists, change host router's 1st children
1017 * TMU mode to HiFi for CL0s to work.
1018 */
1019 tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
1020
1021 return;
1022
1023 err_free:
1024 tb_tunnel_free(tunnel);
1025 err_reclaim:
1026 tb_reclaim_usb3_bandwidth(tb, in, out);
1027 err_dealloc_dp:
1028 tb_switch_dealloc_dp_resource(in->sw, in);
1029 err_rpm_put:
1030 pm_runtime_mark_last_busy(&out->sw->dev);
1031 pm_runtime_put_autosuspend(&out->sw->dev);
1032 pm_runtime_mark_last_busy(&in->sw->dev);
1033 pm_runtime_put_autosuspend(&in->sw->dev);
1034 }
1035
tb_dp_resource_unavailable(struct tb * tb,struct tb_port * port)1036 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1037 {
1038 struct tb_port *in, *out;
1039 struct tb_tunnel *tunnel;
1040
1041 if (tb_port_is_dpin(port)) {
1042 tb_port_dbg(port, "DP IN resource unavailable\n");
1043 in = port;
1044 out = NULL;
1045 } else {
1046 tb_port_dbg(port, "DP OUT resource unavailable\n");
1047 in = NULL;
1048 out = port;
1049 }
1050
1051 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1052 tb_deactivate_and_free_tunnel(tunnel);
1053 list_del_init(&port->list);
1054
1055 /*
1056 * See if there is another DP OUT port that can be used for
1057 * to create another tunnel.
1058 */
1059 tb_tunnel_dp(tb);
1060 }
1061
tb_dp_resource_available(struct tb * tb,struct tb_port * port)1062 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1063 {
1064 struct tb_cm *tcm = tb_priv(tb);
1065 struct tb_port *p;
1066
1067 if (tb_port_is_enabled(port))
1068 return;
1069
1070 list_for_each_entry(p, &tcm->dp_resources, list) {
1071 if (p == port)
1072 return;
1073 }
1074
1075 tb_port_dbg(port, "DP %s resource available\n",
1076 tb_port_is_dpin(port) ? "IN" : "OUT");
1077 list_add_tail(&port->list, &tcm->dp_resources);
1078
1079 /* Look for suitable DP IN <-> DP OUT pairs now */
1080 tb_tunnel_dp(tb);
1081 }
1082
tb_disconnect_and_release_dp(struct tb * tb)1083 static void tb_disconnect_and_release_dp(struct tb *tb)
1084 {
1085 struct tb_cm *tcm = tb_priv(tb);
1086 struct tb_tunnel *tunnel, *n;
1087
1088 /*
1089 * Tear down all DP tunnels and release their resources. They
1090 * will be re-established after resume based on plug events.
1091 */
1092 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1093 if (tb_tunnel_is_dp(tunnel))
1094 tb_deactivate_and_free_tunnel(tunnel);
1095 }
1096
1097 while (!list_empty(&tcm->dp_resources)) {
1098 struct tb_port *port;
1099
1100 port = list_first_entry(&tcm->dp_resources,
1101 struct tb_port, list);
1102 list_del_init(&port->list);
1103 }
1104 }
1105
tb_disconnect_pci(struct tb * tb,struct tb_switch * sw)1106 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1107 {
1108 struct tb_tunnel *tunnel;
1109 struct tb_port *up;
1110
1111 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1112 if (WARN_ON(!up))
1113 return -ENODEV;
1114
1115 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1116 if (WARN_ON(!tunnel))
1117 return -ENODEV;
1118
1119 tb_switch_xhci_disconnect(sw);
1120
1121 tb_tunnel_deactivate(tunnel);
1122 list_del(&tunnel->list);
1123 tb_tunnel_free(tunnel);
1124 return 0;
1125 }
1126
tb_tunnel_pci(struct tb * tb,struct tb_switch * sw)1127 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1128 {
1129 struct tb_port *up, *down, *port;
1130 struct tb_cm *tcm = tb_priv(tb);
1131 struct tb_switch *parent_sw;
1132 struct tb_tunnel *tunnel;
1133
1134 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1135 if (!up)
1136 return 0;
1137
1138 /*
1139 * Look up available down port. Since we are chaining it should
1140 * be found right above this switch.
1141 */
1142 parent_sw = tb_to_switch(sw->dev.parent);
1143 port = tb_port_at(tb_route(sw), parent_sw);
1144 down = tb_find_pcie_down(parent_sw, port);
1145 if (!down)
1146 return 0;
1147
1148 tunnel = tb_tunnel_alloc_pci(tb, up, down);
1149 if (!tunnel)
1150 return -ENOMEM;
1151
1152 if (tb_tunnel_activate(tunnel)) {
1153 tb_port_info(up,
1154 "PCIe tunnel activation failed, aborting\n");
1155 tb_tunnel_free(tunnel);
1156 return -EIO;
1157 }
1158
1159 /*
1160 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1161 * here.
1162 */
1163 if (tb_switch_pcie_l1_enable(sw))
1164 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1165
1166 if (tb_switch_xhci_connect(sw))
1167 tb_sw_warn(sw, "failed to connect xHCI\n");
1168
1169 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1170 return 0;
1171 }
1172
tb_approve_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1173 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1174 int transmit_path, int transmit_ring,
1175 int receive_path, int receive_ring)
1176 {
1177 struct tb_cm *tcm = tb_priv(tb);
1178 struct tb_port *nhi_port, *dst_port;
1179 struct tb_tunnel *tunnel;
1180 struct tb_switch *sw;
1181
1182 sw = tb_to_switch(xd->dev.parent);
1183 dst_port = tb_port_at(xd->route, sw);
1184 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1185
1186 mutex_lock(&tb->lock);
1187 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1188 transmit_ring, receive_path, receive_ring);
1189 if (!tunnel) {
1190 mutex_unlock(&tb->lock);
1191 return -ENOMEM;
1192 }
1193
1194 if (tb_tunnel_activate(tunnel)) {
1195 tb_port_info(nhi_port,
1196 "DMA tunnel activation failed, aborting\n");
1197 tb_tunnel_free(tunnel);
1198 mutex_unlock(&tb->lock);
1199 return -EIO;
1200 }
1201
1202 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1203 mutex_unlock(&tb->lock);
1204 return 0;
1205 }
1206
__tb_disconnect_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1207 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1208 int transmit_path, int transmit_ring,
1209 int receive_path, int receive_ring)
1210 {
1211 struct tb_cm *tcm = tb_priv(tb);
1212 struct tb_port *nhi_port, *dst_port;
1213 struct tb_tunnel *tunnel, *n;
1214 struct tb_switch *sw;
1215
1216 sw = tb_to_switch(xd->dev.parent);
1217 dst_port = tb_port_at(xd->route, sw);
1218 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1219
1220 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1221 if (!tb_tunnel_is_dma(tunnel))
1222 continue;
1223 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1224 continue;
1225
1226 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1227 receive_path, receive_ring))
1228 tb_deactivate_and_free_tunnel(tunnel);
1229 }
1230 }
1231
tb_disconnect_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1232 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1233 int transmit_path, int transmit_ring,
1234 int receive_path, int receive_ring)
1235 {
1236 if (!xd->is_unplugged) {
1237 mutex_lock(&tb->lock);
1238 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1239 transmit_ring, receive_path,
1240 receive_ring);
1241 mutex_unlock(&tb->lock);
1242 }
1243 return 0;
1244 }
1245
1246 /* hotplug handling */
1247
1248 /*
1249 * tb_handle_hotplug() - handle hotplug event
1250 *
1251 * Executes on tb->wq.
1252 */
tb_handle_hotplug(struct work_struct * work)1253 static void tb_handle_hotplug(struct work_struct *work)
1254 {
1255 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1256 struct tb *tb = ev->tb;
1257 struct tb_cm *tcm = tb_priv(tb);
1258 struct tb_switch *sw;
1259 struct tb_port *port;
1260
1261 /* Bring the domain back from sleep if it was suspended */
1262 pm_runtime_get_sync(&tb->dev);
1263
1264 mutex_lock(&tb->lock);
1265 if (!tcm->hotplug_active)
1266 goto out; /* during init, suspend or shutdown */
1267
1268 sw = tb_switch_find_by_route(tb, ev->route);
1269 if (!sw) {
1270 tb_warn(tb,
1271 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1272 ev->route, ev->port, ev->unplug);
1273 goto out;
1274 }
1275 if (ev->port > sw->config.max_port_number) {
1276 tb_warn(tb,
1277 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1278 ev->route, ev->port, ev->unplug);
1279 goto put_sw;
1280 }
1281 port = &sw->ports[ev->port];
1282 if (tb_is_upstream_port(port)) {
1283 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1284 ev->route, ev->port, ev->unplug);
1285 goto put_sw;
1286 }
1287
1288 pm_runtime_get_sync(&sw->dev);
1289
1290 if (ev->unplug) {
1291 tb_retimer_remove_all(port);
1292
1293 if (tb_port_has_remote(port)) {
1294 tb_port_dbg(port, "switch unplugged\n");
1295 tb_sw_set_unplugged(port->remote->sw);
1296 tb_free_invalid_tunnels(tb);
1297 tb_remove_dp_resources(port->remote->sw);
1298 tb_switch_tmu_disable(port->remote->sw);
1299 tb_switch_unconfigure_link(port->remote->sw);
1300 tb_switch_lane_bonding_disable(port->remote->sw);
1301 tb_switch_remove(port->remote->sw);
1302 port->remote = NULL;
1303 if (port->dual_link_port)
1304 port->dual_link_port->remote = NULL;
1305 /* Maybe we can create another DP tunnel */
1306 tb_tunnel_dp(tb);
1307 } else if (port->xdomain) {
1308 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1309
1310 tb_port_dbg(port, "xdomain unplugged\n");
1311 /*
1312 * Service drivers are unbound during
1313 * tb_xdomain_remove() so setting XDomain as
1314 * unplugged here prevents deadlock if they call
1315 * tb_xdomain_disable_paths(). We will tear down
1316 * all the tunnels below.
1317 */
1318 xd->is_unplugged = true;
1319 tb_xdomain_remove(xd);
1320 port->xdomain = NULL;
1321 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
1322 tb_xdomain_put(xd);
1323 tb_port_unconfigure_xdomain(port);
1324 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1325 tb_dp_resource_unavailable(tb, port);
1326 } else if (!port->port) {
1327 tb_sw_dbg(sw, "xHCI disconnect request\n");
1328 tb_switch_xhci_disconnect(sw);
1329 } else {
1330 tb_port_dbg(port,
1331 "got unplug event for disconnected port, ignoring\n");
1332 }
1333 } else if (port->remote) {
1334 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1335 } else if (!port->port && sw->authorized) {
1336 tb_sw_dbg(sw, "xHCI connect request\n");
1337 tb_switch_xhci_connect(sw);
1338 } else {
1339 if (tb_port_is_null(port)) {
1340 tb_port_dbg(port, "hotplug: scanning\n");
1341 tb_scan_port(port);
1342 if (!port->remote)
1343 tb_port_dbg(port, "hotplug: no switch found\n");
1344 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1345 tb_dp_resource_available(tb, port);
1346 }
1347 }
1348
1349 pm_runtime_mark_last_busy(&sw->dev);
1350 pm_runtime_put_autosuspend(&sw->dev);
1351
1352 put_sw:
1353 tb_switch_put(sw);
1354 out:
1355 mutex_unlock(&tb->lock);
1356
1357 pm_runtime_mark_last_busy(&tb->dev);
1358 pm_runtime_put_autosuspend(&tb->dev);
1359
1360 kfree(ev);
1361 }
1362
1363 /*
1364 * tb_schedule_hotplug_handler() - callback function for the control channel
1365 *
1366 * Delegates to tb_handle_hotplug.
1367 */
tb_handle_event(struct tb * tb,enum tb_cfg_pkg_type type,const void * buf,size_t size)1368 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1369 const void *buf, size_t size)
1370 {
1371 const struct cfg_event_pkg *pkg = buf;
1372 u64 route;
1373
1374 if (type != TB_CFG_PKG_EVENT) {
1375 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1376 return;
1377 }
1378
1379 route = tb_cfg_get_route(&pkg->header);
1380
1381 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1382 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1383 pkg->port);
1384 }
1385
1386 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1387 }
1388
tb_stop(struct tb * tb)1389 static void tb_stop(struct tb *tb)
1390 {
1391 struct tb_cm *tcm = tb_priv(tb);
1392 struct tb_tunnel *tunnel;
1393 struct tb_tunnel *n;
1394
1395 cancel_delayed_work(&tcm->remove_work);
1396 /* tunnels are only present after everything has been initialized */
1397 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1398 /*
1399 * DMA tunnels require the driver to be functional so we
1400 * tear them down. Other protocol tunnels can be left
1401 * intact.
1402 */
1403 if (tb_tunnel_is_dma(tunnel))
1404 tb_tunnel_deactivate(tunnel);
1405 tb_tunnel_free(tunnel);
1406 }
1407 tb_switch_remove(tb->root_switch);
1408 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1409 }
1410
tb_scan_finalize_switch(struct device * dev,void * data)1411 static int tb_scan_finalize_switch(struct device *dev, void *data)
1412 {
1413 if (tb_is_switch(dev)) {
1414 struct tb_switch *sw = tb_to_switch(dev);
1415
1416 /*
1417 * If we found that the switch was already setup by the
1418 * boot firmware, mark it as authorized now before we
1419 * send uevent to userspace.
1420 */
1421 if (sw->boot)
1422 sw->authorized = 1;
1423
1424 dev_set_uevent_suppress(dev, false);
1425 kobject_uevent(&dev->kobj, KOBJ_ADD);
1426 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1427 }
1428
1429 return 0;
1430 }
1431
tb_start(struct tb * tb)1432 static int tb_start(struct tb *tb)
1433 {
1434 struct tb_cm *tcm = tb_priv(tb);
1435 int ret;
1436
1437 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1438 if (IS_ERR(tb->root_switch))
1439 return PTR_ERR(tb->root_switch);
1440
1441 /*
1442 * ICM firmware upgrade needs running firmware and in native
1443 * mode that is not available so disable firmware upgrade of the
1444 * root switch.
1445 *
1446 * However, USB4 routers support NVM firmware upgrade if they
1447 * implement the necessary router operations.
1448 */
1449 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
1450 /* All USB4 routers support runtime PM */
1451 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1452
1453 ret = tb_switch_configure(tb->root_switch);
1454 if (ret) {
1455 tb_switch_put(tb->root_switch);
1456 return ret;
1457 }
1458
1459 /* Announce the switch to the world */
1460 ret = tb_switch_add(tb->root_switch);
1461 if (ret) {
1462 tb_switch_put(tb->root_switch);
1463 return ret;
1464 }
1465
1466 /*
1467 * To support highest CLx state, we set host router's TMU to
1468 * Normal mode.
1469 */
1470 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
1471 false);
1472 /* Enable TMU if it is off */
1473 tb_switch_tmu_enable(tb->root_switch);
1474 /* Full scan to discover devices added before the driver was loaded. */
1475 tb_scan_switch(tb->root_switch);
1476 /* Find out tunnels created by the boot firmware */
1477 tb_discover_tunnels(tb);
1478 /* Add DP resources from the DP tunnels created by the boot firmware */
1479 tb_discover_dp_resources(tb);
1480 /*
1481 * If the boot firmware did not create USB 3.x tunnels create them
1482 * now for the whole topology.
1483 */
1484 tb_create_usb3_tunnels(tb->root_switch);
1485 /* Add DP IN resources for the root switch */
1486 tb_add_dp_resources(tb->root_switch);
1487 /* Make the discovered switches available to the userspace */
1488 device_for_each_child(&tb->root_switch->dev, NULL,
1489 tb_scan_finalize_switch);
1490
1491 /* Allow tb_handle_hotplug to progress events */
1492 tcm->hotplug_active = true;
1493 return 0;
1494 }
1495
tb_suspend_noirq(struct tb * tb)1496 static int tb_suspend_noirq(struct tb *tb)
1497 {
1498 struct tb_cm *tcm = tb_priv(tb);
1499
1500 tb_dbg(tb, "suspending...\n");
1501 tb_disconnect_and_release_dp(tb);
1502 tb_switch_suspend(tb->root_switch, false);
1503 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1504 tb_dbg(tb, "suspend finished\n");
1505
1506 return 0;
1507 }
1508
tb_restore_children(struct tb_switch * sw)1509 static void tb_restore_children(struct tb_switch *sw)
1510 {
1511 struct tb_port *port;
1512 int ret;
1513
1514 /* No need to restore if the router is already unplugged */
1515 if (sw->is_unplugged)
1516 return;
1517
1518 /*
1519 * CL0s and CL1 are enabled and supported together.
1520 * Silently ignore CLx re-enabling in case CLx is not supported.
1521 */
1522 ret = tb_switch_enable_clx(sw, TB_CL1);
1523 if (ret && ret != -EOPNOTSUPP)
1524 tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
1525 tb_switch_clx_name(TB_CL1));
1526
1527 if (tb_switch_is_clx_enabled(sw, TB_CL1))
1528 /*
1529 * To support highest CLx state, we set router's TMU to
1530 * Normal-Uni mode.
1531 */
1532 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
1533 else
1534 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
1535 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
1536
1537 if (tb_enable_tmu(sw))
1538 tb_sw_warn(sw, "failed to restore TMU configuration\n");
1539
1540 tb_switch_for_each_port(sw, port) {
1541 if (!tb_port_has_remote(port) && !port->xdomain)
1542 continue;
1543
1544 if (port->remote) {
1545 tb_switch_lane_bonding_enable(port->remote->sw);
1546 tb_switch_configure_link(port->remote->sw);
1547
1548 tb_restore_children(port->remote->sw);
1549 } else if (port->xdomain) {
1550 tb_port_configure_xdomain(port, port->xdomain);
1551 }
1552 }
1553 }
1554
tb_resume_noirq(struct tb * tb)1555 static int tb_resume_noirq(struct tb *tb)
1556 {
1557 struct tb_cm *tcm = tb_priv(tb);
1558 struct tb_tunnel *tunnel, *n;
1559 unsigned int usb3_delay = 0;
1560 LIST_HEAD(tunnels);
1561
1562 tb_dbg(tb, "resuming...\n");
1563
1564 /* remove any pci devices the firmware might have setup */
1565 tb_switch_reset(tb->root_switch);
1566
1567 tb_switch_resume(tb->root_switch);
1568 tb_free_invalid_tunnels(tb);
1569 tb_free_unplugged_children(tb->root_switch);
1570 tb_restore_children(tb->root_switch);
1571
1572 /*
1573 * If we get here from suspend to disk the boot firmware or the
1574 * restore kernel might have created tunnels of its own. Since
1575 * we cannot be sure they are usable for us we find and tear
1576 * them down.
1577 */
1578 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
1579 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
1580 if (tb_tunnel_is_usb3(tunnel))
1581 usb3_delay = 500;
1582 tb_tunnel_deactivate(tunnel);
1583 tb_tunnel_free(tunnel);
1584 }
1585
1586 /* Re-create our tunnels now */
1587 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1588 /* USB3 requires delay before it can be re-activated */
1589 if (tb_tunnel_is_usb3(tunnel)) {
1590 msleep(usb3_delay);
1591 /* Only need to do it once */
1592 usb3_delay = 0;
1593 }
1594 tb_tunnel_restart(tunnel);
1595 }
1596 if (!list_empty(&tcm->tunnel_list)) {
1597 /*
1598 * the pcie links need some time to get going.
1599 * 100ms works for me...
1600 */
1601 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1602 msleep(100);
1603 }
1604 /* Allow tb_handle_hotplug to progress events */
1605 tcm->hotplug_active = true;
1606 tb_dbg(tb, "resume finished\n");
1607
1608 return 0;
1609 }
1610
tb_free_unplugged_xdomains(struct tb_switch * sw)1611 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1612 {
1613 struct tb_port *port;
1614 int ret = 0;
1615
1616 tb_switch_for_each_port(sw, port) {
1617 if (tb_is_upstream_port(port))
1618 continue;
1619 if (port->xdomain && port->xdomain->is_unplugged) {
1620 tb_retimer_remove_all(port);
1621 tb_xdomain_remove(port->xdomain);
1622 tb_port_unconfigure_xdomain(port);
1623 port->xdomain = NULL;
1624 ret++;
1625 } else if (port->remote) {
1626 ret += tb_free_unplugged_xdomains(port->remote->sw);
1627 }
1628 }
1629
1630 return ret;
1631 }
1632
tb_freeze_noirq(struct tb * tb)1633 static int tb_freeze_noirq(struct tb *tb)
1634 {
1635 struct tb_cm *tcm = tb_priv(tb);
1636
1637 tcm->hotplug_active = false;
1638 return 0;
1639 }
1640
tb_thaw_noirq(struct tb * tb)1641 static int tb_thaw_noirq(struct tb *tb)
1642 {
1643 struct tb_cm *tcm = tb_priv(tb);
1644
1645 tcm->hotplug_active = true;
1646 return 0;
1647 }
1648
tb_complete(struct tb * tb)1649 static void tb_complete(struct tb *tb)
1650 {
1651 /*
1652 * Release any unplugged XDomains and if there is a case where
1653 * another domain is swapped in place of unplugged XDomain we
1654 * need to run another rescan.
1655 */
1656 mutex_lock(&tb->lock);
1657 if (tb_free_unplugged_xdomains(tb->root_switch))
1658 tb_scan_switch(tb->root_switch);
1659 mutex_unlock(&tb->lock);
1660 }
1661
tb_runtime_suspend(struct tb * tb)1662 static int tb_runtime_suspend(struct tb *tb)
1663 {
1664 struct tb_cm *tcm = tb_priv(tb);
1665
1666 mutex_lock(&tb->lock);
1667 tb_switch_suspend(tb->root_switch, true);
1668 tcm->hotplug_active = false;
1669 mutex_unlock(&tb->lock);
1670
1671 return 0;
1672 }
1673
tb_remove_work(struct work_struct * work)1674 static void tb_remove_work(struct work_struct *work)
1675 {
1676 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1677 struct tb *tb = tcm_to_tb(tcm);
1678
1679 mutex_lock(&tb->lock);
1680 if (tb->root_switch) {
1681 tb_free_unplugged_children(tb->root_switch);
1682 tb_free_unplugged_xdomains(tb->root_switch);
1683 }
1684 mutex_unlock(&tb->lock);
1685 }
1686
tb_runtime_resume(struct tb * tb)1687 static int tb_runtime_resume(struct tb *tb)
1688 {
1689 struct tb_cm *tcm = tb_priv(tb);
1690 struct tb_tunnel *tunnel, *n;
1691
1692 mutex_lock(&tb->lock);
1693 tb_switch_resume(tb->root_switch);
1694 tb_free_invalid_tunnels(tb);
1695 tb_restore_children(tb->root_switch);
1696 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1697 tb_tunnel_restart(tunnel);
1698 tcm->hotplug_active = true;
1699 mutex_unlock(&tb->lock);
1700
1701 /*
1702 * Schedule cleanup of any unplugged devices. Run this in a
1703 * separate thread to avoid possible deadlock if the device
1704 * removal runtime resumes the unplugged device.
1705 */
1706 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1707 return 0;
1708 }
1709
1710 static const struct tb_cm_ops tb_cm_ops = {
1711 .start = tb_start,
1712 .stop = tb_stop,
1713 .suspend_noirq = tb_suspend_noirq,
1714 .resume_noirq = tb_resume_noirq,
1715 .freeze_noirq = tb_freeze_noirq,
1716 .thaw_noirq = tb_thaw_noirq,
1717 .complete = tb_complete,
1718 .runtime_suspend = tb_runtime_suspend,
1719 .runtime_resume = tb_runtime_resume,
1720 .handle_event = tb_handle_event,
1721 .disapprove_switch = tb_disconnect_pci,
1722 .approve_switch = tb_tunnel_pci,
1723 .approve_xdomain_paths = tb_approve_xdomain_paths,
1724 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1725 };
1726
1727 /*
1728 * During suspend the Thunderbolt controller is reset and all PCIe
1729 * tunnels are lost. The NHI driver will try to reestablish all tunnels
1730 * during resume. This adds device links between the tunneled PCIe
1731 * downstream ports and the NHI so that the device core will make sure
1732 * NHI is resumed first before the rest.
1733 */
tb_apple_add_links(struct tb_nhi * nhi)1734 static void tb_apple_add_links(struct tb_nhi *nhi)
1735 {
1736 struct pci_dev *upstream, *pdev;
1737
1738 if (!x86_apple_machine)
1739 return;
1740
1741 switch (nhi->pdev->device) {
1742 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1743 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1744 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1745 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1746 break;
1747 default:
1748 return;
1749 }
1750
1751 upstream = pci_upstream_bridge(nhi->pdev);
1752 while (upstream) {
1753 if (!pci_is_pcie(upstream))
1754 return;
1755 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1756 break;
1757 upstream = pci_upstream_bridge(upstream);
1758 }
1759
1760 if (!upstream)
1761 return;
1762
1763 /*
1764 * For each hotplug downstream port, create add device link
1765 * back to NHI so that PCIe tunnels can be re-established after
1766 * sleep.
1767 */
1768 for_each_pci_bridge(pdev, upstream->subordinate) {
1769 const struct device_link *link;
1770
1771 if (!pci_is_pcie(pdev))
1772 continue;
1773 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1774 !pdev->is_hotplug_bridge)
1775 continue;
1776
1777 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1778 DL_FLAG_AUTOREMOVE_SUPPLIER |
1779 DL_FLAG_PM_RUNTIME);
1780 if (link) {
1781 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1782 dev_name(&pdev->dev));
1783 } else {
1784 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1785 dev_name(&pdev->dev));
1786 }
1787 }
1788 }
1789
tb_probe(struct tb_nhi * nhi)1790 struct tb *tb_probe(struct tb_nhi *nhi)
1791 {
1792 struct tb_cm *tcm;
1793 struct tb *tb;
1794
1795 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
1796 if (!tb)
1797 return NULL;
1798
1799 if (tb_acpi_may_tunnel_pcie())
1800 tb->security_level = TB_SECURITY_USER;
1801 else
1802 tb->security_level = TB_SECURITY_NOPCIE;
1803
1804 tb->cm_ops = &tb_cm_ops;
1805
1806 tcm = tb_priv(tb);
1807 INIT_LIST_HEAD(&tcm->tunnel_list);
1808 INIT_LIST_HEAD(&tcm->dp_resources);
1809 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
1810
1811 tb_dbg(tb, "using software connection manager\n");
1812
1813 tb_apple_add_links(nhi);
1814 tb_acpi_add_links(nhi);
1815
1816 return tb;
1817 }
1818