1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - bus logic (NHI independent)
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT 100 /* ms */
20 #define MAX_GROUPS 7 /* max Group_ID is 7 */
21
22 /**
23 * struct tb_cm - Simple Thunderbolt connection manager
24 * @tunnel_list: List of active tunnels
25 * @dp_resources: List of available DP resources for DP tunneling
26 * @hotplug_active: tb_handle_hotplug will stop progressing plug
27 * events and exit if this is not set (it needs to
28 * acquire the lock one more time). Used to drain wq
29 * after cfg has been paused.
30 * @remove_work: Work used to remove any unplugged routers after
31 * runtime resume
32 * @groups: Bandwidth groups used in this domain.
33 */
34 struct tb_cm {
35 struct list_head tunnel_list;
36 struct list_head dp_resources;
37 bool hotplug_active;
38 struct delayed_work remove_work;
39 struct tb_bandwidth_group groups[MAX_GROUPS];
40 };
41
tcm_to_tb(struct tb_cm * tcm)42 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
43 {
44 return ((void *)tcm - sizeof(struct tb));
45 }
46
47 struct tb_hotplug_event {
48 struct work_struct work;
49 struct tb *tb;
50 u64 route;
51 u8 port;
52 bool unplug;
53 };
54
tb_init_bandwidth_groups(struct tb_cm * tcm)55 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
56 {
57 int i;
58
59 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
60 struct tb_bandwidth_group *group = &tcm->groups[i];
61
62 group->tb = tcm_to_tb(tcm);
63 group->index = i + 1;
64 INIT_LIST_HEAD(&group->ports);
65 }
66 }
67
tb_bandwidth_group_attach_port(struct tb_bandwidth_group * group,struct tb_port * in)68 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
69 struct tb_port *in)
70 {
71 if (!group || WARN_ON(in->group))
72 return;
73
74 in->group = group;
75 list_add_tail(&in->group_list, &group->ports);
76
77 tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
78 }
79
tb_find_free_bandwidth_group(struct tb_cm * tcm)80 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
81 {
82 int i;
83
84 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
85 struct tb_bandwidth_group *group = &tcm->groups[i];
86
87 if (list_empty(&group->ports))
88 return group;
89 }
90
91 return NULL;
92 }
93
94 static struct tb_bandwidth_group *
tb_attach_bandwidth_group(struct tb_cm * tcm,struct tb_port * in,struct tb_port * out)95 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
96 struct tb_port *out)
97 {
98 struct tb_bandwidth_group *group;
99 struct tb_tunnel *tunnel;
100
101 /*
102 * Find all DP tunnels that go through all the same USB4 links
103 * as this one. Because we always setup tunnels the same way we
104 * can just check for the routers at both ends of the tunnels
105 * and if they are the same we have a match.
106 */
107 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
108 if (!tb_tunnel_is_dp(tunnel))
109 continue;
110
111 if (tunnel->src_port->sw == in->sw &&
112 tunnel->dst_port->sw == out->sw) {
113 group = tunnel->src_port->group;
114 if (group) {
115 tb_bandwidth_group_attach_port(group, in);
116 return group;
117 }
118 }
119 }
120
121 /* Pick up next available group then */
122 group = tb_find_free_bandwidth_group(tcm);
123 if (group)
124 tb_bandwidth_group_attach_port(group, in);
125 else
126 tb_port_warn(in, "no available bandwidth groups\n");
127
128 return group;
129 }
130
tb_discover_bandwidth_group(struct tb_cm * tcm,struct tb_port * in,struct tb_port * out)131 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
132 struct tb_port *out)
133 {
134 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
135 int index, i;
136
137 index = usb4_dp_port_group_id(in);
138 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
139 if (tcm->groups[i].index == index) {
140 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
141 return;
142 }
143 }
144 }
145
146 tb_attach_bandwidth_group(tcm, in, out);
147 }
148
tb_detach_bandwidth_group(struct tb_port * in)149 static void tb_detach_bandwidth_group(struct tb_port *in)
150 {
151 struct tb_bandwidth_group *group = in->group;
152
153 if (group) {
154 in->group = NULL;
155 list_del_init(&in->group_list);
156
157 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
158 }
159 }
160
161 static void tb_handle_hotplug(struct work_struct *work);
162
tb_queue_hotplug(struct tb * tb,u64 route,u8 port,bool unplug)163 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
164 {
165 struct tb_hotplug_event *ev;
166
167 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
168 if (!ev)
169 return;
170
171 ev->tb = tb;
172 ev->route = route;
173 ev->port = port;
174 ev->unplug = unplug;
175 INIT_WORK(&ev->work, tb_handle_hotplug);
176 queue_work(tb->wq, &ev->work);
177 }
178
179 /* enumeration & hot plug handling */
180
tb_add_dp_resources(struct tb_switch * sw)181 static void tb_add_dp_resources(struct tb_switch *sw)
182 {
183 struct tb_cm *tcm = tb_priv(sw->tb);
184 struct tb_port *port;
185
186 tb_switch_for_each_port(sw, port) {
187 if (!tb_port_is_dpin(port))
188 continue;
189
190 if (!tb_switch_query_dp_resource(sw, port))
191 continue;
192
193 list_add_tail(&port->list, &tcm->dp_resources);
194 tb_port_dbg(port, "DP IN resource available\n");
195 }
196 }
197
tb_remove_dp_resources(struct tb_switch * sw)198 static void tb_remove_dp_resources(struct tb_switch *sw)
199 {
200 struct tb_cm *tcm = tb_priv(sw->tb);
201 struct tb_port *port, *tmp;
202
203 /* Clear children resources first */
204 tb_switch_for_each_port(sw, port) {
205 if (tb_port_has_remote(port))
206 tb_remove_dp_resources(port->remote->sw);
207 }
208
209 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
210 if (port->sw == sw) {
211 tb_port_dbg(port, "DP OUT resource unavailable\n");
212 list_del_init(&port->list);
213 }
214 }
215 }
216
tb_discover_dp_resource(struct tb * tb,struct tb_port * port)217 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
218 {
219 struct tb_cm *tcm = tb_priv(tb);
220 struct tb_port *p;
221
222 list_for_each_entry(p, &tcm->dp_resources, list) {
223 if (p == port)
224 return;
225 }
226
227 tb_port_dbg(port, "DP %s resource available discovered\n",
228 tb_port_is_dpin(port) ? "IN" : "OUT");
229 list_add_tail(&port->list, &tcm->dp_resources);
230 }
231
tb_discover_dp_resources(struct tb * tb)232 static void tb_discover_dp_resources(struct tb *tb)
233 {
234 struct tb_cm *tcm = tb_priv(tb);
235 struct tb_tunnel *tunnel;
236
237 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
238 if (tb_tunnel_is_dp(tunnel))
239 tb_discover_dp_resource(tb, tunnel->dst_port);
240 }
241 }
242
243 /* Enables CL states up to host router */
tb_enable_clx(struct tb_switch * sw)244 static int tb_enable_clx(struct tb_switch *sw)
245 {
246 struct tb_cm *tcm = tb_priv(sw->tb);
247 unsigned int clx = TB_CL0S | TB_CL1;
248 const struct tb_tunnel *tunnel;
249 int ret;
250
251 /*
252 * Currently only enable CLx for the first link. This is enough
253 * to allow the CPU to save energy at least on Intel hardware
254 * and makes it slightly simpler to implement. We may change
255 * this in the future to cover the whole topology if it turns
256 * out to be beneficial.
257 */
258 while (sw && sw->config.depth > 1)
259 sw = tb_switch_parent(sw);
260
261 if (!sw)
262 return 0;
263
264 if (sw->config.depth != 1)
265 return 0;
266
267 /*
268 * If we are re-enabling then check if there is an active DMA
269 * tunnel and in that case bail out.
270 */
271 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
272 if (tb_tunnel_is_dma(tunnel)) {
273 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
274 return 0;
275 }
276 }
277
278 /*
279 * Initially try with CL2. If that's not supported by the
280 * topology try with CL0s and CL1 and then give up.
281 */
282 ret = tb_switch_clx_enable(sw, clx | TB_CL2);
283 if (ret == -EOPNOTSUPP)
284 ret = tb_switch_clx_enable(sw, clx);
285 return ret == -EOPNOTSUPP ? 0 : ret;
286 }
287
288 /* Disables CL states up to the host router */
tb_disable_clx(struct tb_switch * sw)289 static void tb_disable_clx(struct tb_switch *sw)
290 {
291 do {
292 if (tb_switch_clx_disable(sw) < 0)
293 tb_sw_warn(sw, "failed to disable CL states\n");
294 sw = tb_switch_parent(sw);
295 } while (sw);
296 }
297
tb_increase_switch_tmu_accuracy(struct device * dev,void * data)298 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
299 {
300 struct tb_switch *sw;
301
302 sw = tb_to_switch(dev);
303 if (!sw)
304 return 0;
305
306 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
307 enum tb_switch_tmu_mode mode;
308 int ret;
309
310 if (tb_switch_clx_is_enabled(sw, TB_CL1))
311 mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
312 else
313 mode = TB_SWITCH_TMU_MODE_HIFI_BI;
314
315 ret = tb_switch_tmu_configure(sw, mode);
316 if (ret)
317 return ret;
318
319 return tb_switch_tmu_enable(sw);
320 }
321
322 return 0;
323 }
324
tb_increase_tmu_accuracy(struct tb_tunnel * tunnel)325 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
326 {
327 struct tb_switch *sw;
328
329 if (!tunnel)
330 return;
331
332 /*
333 * Once first DP tunnel is established we change the TMU
334 * accuracy of first depth child routers (and the host router)
335 * to the highest. This is needed for the DP tunneling to work
336 * but also allows CL0s.
337 *
338 * If both routers are v2 then we don't need to do anything as
339 * they are using enhanced TMU mode that allows all CLx.
340 */
341 sw = tunnel->tb->root_switch;
342 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
343 }
344
tb_enable_tmu(struct tb_switch * sw)345 static int tb_enable_tmu(struct tb_switch *sw)
346 {
347 int ret;
348
349 /*
350 * If both routers at the end of the link are v2 we simply
351 * enable the enhanched uni-directional mode. That covers all
352 * the CL states. For v1 and before we need to use the normal
353 * rate to allow CL1 (when supported). Otherwise we keep the TMU
354 * running at the highest accuracy.
355 */
356 ret = tb_switch_tmu_configure(sw,
357 TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
358 if (ret == -EOPNOTSUPP) {
359 if (tb_switch_clx_is_enabled(sw, TB_CL1))
360 ret = tb_switch_tmu_configure(sw,
361 TB_SWITCH_TMU_MODE_LOWRES);
362 else
363 ret = tb_switch_tmu_configure(sw,
364 TB_SWITCH_TMU_MODE_HIFI_BI);
365 }
366 if (ret)
367 return ret;
368
369 /* If it is already enabled in correct mode, don't touch it */
370 if (tb_switch_tmu_is_enabled(sw))
371 return 0;
372
373 ret = tb_switch_tmu_disable(sw);
374 if (ret)
375 return ret;
376
377 ret = tb_switch_tmu_post_time(sw);
378 if (ret)
379 return ret;
380
381 return tb_switch_tmu_enable(sw);
382 }
383
tb_switch_discover_tunnels(struct tb_switch * sw,struct list_head * list,bool alloc_hopids)384 static void tb_switch_discover_tunnels(struct tb_switch *sw,
385 struct list_head *list,
386 bool alloc_hopids)
387 {
388 struct tb *tb = sw->tb;
389 struct tb_port *port;
390
391 tb_switch_for_each_port(sw, port) {
392 struct tb_tunnel *tunnel = NULL;
393
394 switch (port->config.type) {
395 case TB_TYPE_DP_HDMI_IN:
396 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
397 tb_increase_tmu_accuracy(tunnel);
398 break;
399
400 case TB_TYPE_PCIE_DOWN:
401 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
402 break;
403
404 case TB_TYPE_USB3_DOWN:
405 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
406 break;
407
408 default:
409 break;
410 }
411
412 if (tunnel)
413 list_add_tail(&tunnel->list, list);
414 }
415
416 tb_switch_for_each_port(sw, port) {
417 if (tb_port_has_remote(port)) {
418 tb_switch_discover_tunnels(port->remote->sw, list,
419 alloc_hopids);
420 }
421 }
422 }
423
tb_discover_tunnels(struct tb * tb)424 static void tb_discover_tunnels(struct tb *tb)
425 {
426 struct tb_cm *tcm = tb_priv(tb);
427 struct tb_tunnel *tunnel;
428
429 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
430
431 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
432 if (tb_tunnel_is_pci(tunnel)) {
433 struct tb_switch *parent = tunnel->dst_port->sw;
434
435 while (parent != tunnel->src_port->sw) {
436 parent->boot = true;
437 parent = tb_switch_parent(parent);
438 }
439 } else if (tb_tunnel_is_dp(tunnel)) {
440 struct tb_port *in = tunnel->src_port;
441 struct tb_port *out = tunnel->dst_port;
442
443 /* Keep the domain from powering down */
444 pm_runtime_get_sync(&in->sw->dev);
445 pm_runtime_get_sync(&out->sw->dev);
446
447 tb_discover_bandwidth_group(tcm, in, out);
448 }
449 }
450 }
451
tb_port_configure_xdomain(struct tb_port * port,struct tb_xdomain * xd)452 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
453 {
454 if (tb_switch_is_usb4(port->sw))
455 return usb4_port_configure_xdomain(port, xd);
456 return tb_lc_configure_xdomain(port);
457 }
458
tb_port_unconfigure_xdomain(struct tb_port * port)459 static void tb_port_unconfigure_xdomain(struct tb_port *port)
460 {
461 if (tb_switch_is_usb4(port->sw))
462 usb4_port_unconfigure_xdomain(port);
463 else
464 tb_lc_unconfigure_xdomain(port);
465
466 tb_port_enable(port->dual_link_port);
467 }
468
tb_scan_xdomain(struct tb_port * port)469 static void tb_scan_xdomain(struct tb_port *port)
470 {
471 struct tb_switch *sw = port->sw;
472 struct tb *tb = sw->tb;
473 struct tb_xdomain *xd;
474 u64 route;
475
476 if (!tb_is_xdomain_enabled())
477 return;
478
479 route = tb_downstream_route(port);
480 xd = tb_xdomain_find_by_route(tb, route);
481 if (xd) {
482 tb_xdomain_put(xd);
483 return;
484 }
485
486 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
487 NULL);
488 if (xd) {
489 tb_port_at(route, sw)->xdomain = xd;
490 tb_port_configure_xdomain(port, xd);
491 tb_xdomain_add(xd);
492 }
493 }
494
495 /**
496 * tb_find_unused_port() - return the first inactive port on @sw
497 * @sw: Switch to find the port on
498 * @type: Port type to look for
499 */
tb_find_unused_port(struct tb_switch * sw,enum tb_port_type type)500 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
501 enum tb_port_type type)
502 {
503 struct tb_port *port;
504
505 tb_switch_for_each_port(sw, port) {
506 if (tb_is_upstream_port(port))
507 continue;
508 if (port->config.type != type)
509 continue;
510 if (!port->cap_adap)
511 continue;
512 if (tb_port_is_enabled(port))
513 continue;
514 return port;
515 }
516 return NULL;
517 }
518
tb_find_usb3_down(struct tb_switch * sw,const struct tb_port * port)519 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
520 const struct tb_port *port)
521 {
522 struct tb_port *down;
523
524 down = usb4_switch_map_usb3_down(sw, port);
525 if (down && !tb_usb3_port_is_enabled(down))
526 return down;
527 return NULL;
528 }
529
tb_find_tunnel(struct tb * tb,enum tb_tunnel_type type,struct tb_port * src_port,struct tb_port * dst_port)530 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
531 struct tb_port *src_port,
532 struct tb_port *dst_port)
533 {
534 struct tb_cm *tcm = tb_priv(tb);
535 struct tb_tunnel *tunnel;
536
537 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
538 if (tunnel->type == type &&
539 ((src_port && src_port == tunnel->src_port) ||
540 (dst_port && dst_port == tunnel->dst_port))) {
541 return tunnel;
542 }
543 }
544
545 return NULL;
546 }
547
tb_find_first_usb3_tunnel(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)548 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
549 struct tb_port *src_port,
550 struct tb_port *dst_port)
551 {
552 struct tb_port *port, *usb3_down;
553 struct tb_switch *sw;
554
555 /* Pick the router that is deepest in the topology */
556 if (dst_port->sw->config.depth > src_port->sw->config.depth)
557 sw = dst_port->sw;
558 else
559 sw = src_port->sw;
560
561 /* Can't be the host router */
562 if (sw == tb->root_switch)
563 return NULL;
564
565 /* Find the downstream USB4 port that leads to this router */
566 port = tb_port_at(tb_route(sw), tb->root_switch);
567 /* Find the corresponding host router USB3 downstream port */
568 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
569 if (!usb3_down)
570 return NULL;
571
572 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
573 }
574
tb_available_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port,int * available_up,int * available_down)575 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
576 struct tb_port *dst_port, int *available_up, int *available_down)
577 {
578 int usb3_consumed_up, usb3_consumed_down, ret;
579 struct tb_cm *tcm = tb_priv(tb);
580 struct tb_tunnel *tunnel;
581 struct tb_port *port;
582
583 tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
584 tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
585 dst_port->port);
586
587 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
588 if (tunnel && tunnel->src_port != src_port &&
589 tunnel->dst_port != dst_port) {
590 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
591 &usb3_consumed_down);
592 if (ret)
593 return ret;
594 } else {
595 usb3_consumed_up = 0;
596 usb3_consumed_down = 0;
597 }
598
599 /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
600 *available_up = *available_down = 120000;
601
602 /* Find the minimum available bandwidth over all links */
603 tb_for_each_port_on_path(src_port, dst_port, port) {
604 int link_speed, link_width, up_bw, down_bw;
605
606 if (!tb_port_is_null(port))
607 continue;
608
609 if (tb_is_upstream_port(port)) {
610 link_speed = port->sw->link_speed;
611 /*
612 * sw->link_width is from upstream perspective
613 * so we use the opposite for downstream of the
614 * host router.
615 */
616 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
617 up_bw = link_speed * 3 * 1000;
618 down_bw = link_speed * 1 * 1000;
619 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
620 up_bw = link_speed * 1 * 1000;
621 down_bw = link_speed * 3 * 1000;
622 } else {
623 up_bw = link_speed * port->sw->link_width * 1000;
624 down_bw = up_bw;
625 }
626 } else {
627 link_speed = tb_port_get_link_speed(port);
628 if (link_speed < 0)
629 return link_speed;
630
631 link_width = tb_port_get_link_width(port);
632 if (link_width < 0)
633 return link_width;
634
635 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
636 up_bw = link_speed * 1 * 1000;
637 down_bw = link_speed * 3 * 1000;
638 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
639 up_bw = link_speed * 3 * 1000;
640 down_bw = link_speed * 1 * 1000;
641 } else {
642 up_bw = link_speed * link_width * 1000;
643 down_bw = up_bw;
644 }
645 }
646
647 /* Leave 10% guard band */
648 up_bw -= up_bw / 10;
649 down_bw -= down_bw / 10;
650
651 tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
652 down_bw);
653
654 /*
655 * Find all DP tunnels that cross the port and reduce
656 * their consumed bandwidth from the available.
657 */
658 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
659 int dp_consumed_up, dp_consumed_down;
660
661 if (tb_tunnel_is_invalid(tunnel))
662 continue;
663
664 if (!tb_tunnel_is_dp(tunnel))
665 continue;
666
667 if (!tb_tunnel_port_on_path(tunnel, port))
668 continue;
669
670 /*
671 * Ignore the DP tunnel between src_port and
672 * dst_port because it is the same tunnel and we
673 * may be re-calculating estimated bandwidth.
674 */
675 if (tunnel->src_port == src_port &&
676 tunnel->dst_port == dst_port)
677 continue;
678
679 ret = tb_tunnel_consumed_bandwidth(tunnel,
680 &dp_consumed_up,
681 &dp_consumed_down);
682 if (ret)
683 return ret;
684
685 up_bw -= dp_consumed_up;
686 down_bw -= dp_consumed_down;
687 }
688
689 /*
690 * If USB3 is tunneled from the host router down to the
691 * branch leading to port we need to take USB3 consumed
692 * bandwidth into account regardless whether it actually
693 * crosses the port.
694 */
695 up_bw -= usb3_consumed_up;
696 down_bw -= usb3_consumed_down;
697
698 if (up_bw < *available_up)
699 *available_up = up_bw;
700 if (down_bw < *available_down)
701 *available_down = down_bw;
702 }
703
704 if (*available_up < 0)
705 *available_up = 0;
706 if (*available_down < 0)
707 *available_down = 0;
708
709 return 0;
710 }
711
tb_release_unused_usb3_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)712 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
713 struct tb_port *src_port,
714 struct tb_port *dst_port)
715 {
716 struct tb_tunnel *tunnel;
717
718 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
719 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
720 }
721
tb_reclaim_usb3_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)722 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
723 struct tb_port *dst_port)
724 {
725 int ret, available_up, available_down;
726 struct tb_tunnel *tunnel;
727
728 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
729 if (!tunnel)
730 return;
731
732 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
733
734 /*
735 * Calculate available bandwidth for the first hop USB3 tunnel.
736 * That determines the whole USB3 bandwidth for this branch.
737 */
738 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
739 &available_up, &available_down);
740 if (ret) {
741 tb_warn(tb, "failed to calculate available bandwidth\n");
742 return;
743 }
744
745 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
746 available_up, available_down);
747
748 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
749 }
750
tb_tunnel_usb3(struct tb * tb,struct tb_switch * sw)751 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
752 {
753 struct tb_switch *parent = tb_switch_parent(sw);
754 int ret, available_up, available_down;
755 struct tb_port *up, *down, *port;
756 struct tb_cm *tcm = tb_priv(tb);
757 struct tb_tunnel *tunnel;
758
759 if (!tb_acpi_may_tunnel_usb3()) {
760 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
761 return 0;
762 }
763
764 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
765 if (!up)
766 return 0;
767
768 if (!sw->link_usb4)
769 return 0;
770
771 /*
772 * Look up available down port. Since we are chaining it should
773 * be found right above this switch.
774 */
775 port = tb_switch_downstream_port(sw);
776 down = tb_find_usb3_down(parent, port);
777 if (!down)
778 return 0;
779
780 if (tb_route(parent)) {
781 struct tb_port *parent_up;
782 /*
783 * Check first that the parent switch has its upstream USB3
784 * port enabled. Otherwise the chain is not complete and
785 * there is no point setting up a new tunnel.
786 */
787 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
788 if (!parent_up || !tb_port_is_enabled(parent_up))
789 return 0;
790
791 /* Make all unused bandwidth available for the new tunnel */
792 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
793 if (ret)
794 return ret;
795 }
796
797 ret = tb_available_bandwidth(tb, down, up, &available_up,
798 &available_down);
799 if (ret)
800 goto err_reclaim;
801
802 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
803 available_up, available_down);
804
805 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
806 available_down);
807 if (!tunnel) {
808 ret = -ENOMEM;
809 goto err_reclaim;
810 }
811
812 if (tb_tunnel_activate(tunnel)) {
813 tb_port_info(up,
814 "USB3 tunnel activation failed, aborting\n");
815 ret = -EIO;
816 goto err_free;
817 }
818
819 list_add_tail(&tunnel->list, &tcm->tunnel_list);
820 if (tb_route(parent))
821 tb_reclaim_usb3_bandwidth(tb, down, up);
822
823 return 0;
824
825 err_free:
826 tb_tunnel_free(tunnel);
827 err_reclaim:
828 if (tb_route(parent))
829 tb_reclaim_usb3_bandwidth(tb, down, up);
830
831 return ret;
832 }
833
tb_create_usb3_tunnels(struct tb_switch * sw)834 static int tb_create_usb3_tunnels(struct tb_switch *sw)
835 {
836 struct tb_port *port;
837 int ret;
838
839 if (!tb_acpi_may_tunnel_usb3())
840 return 0;
841
842 if (tb_route(sw)) {
843 ret = tb_tunnel_usb3(sw->tb, sw);
844 if (ret)
845 return ret;
846 }
847
848 tb_switch_for_each_port(sw, port) {
849 if (!tb_port_has_remote(port))
850 continue;
851 ret = tb_create_usb3_tunnels(port->remote->sw);
852 if (ret)
853 return ret;
854 }
855
856 return 0;
857 }
858
859 static void tb_scan_port(struct tb_port *port);
860
861 /*
862 * tb_scan_switch() - scan for and initialize downstream switches
863 */
tb_scan_switch(struct tb_switch * sw)864 static void tb_scan_switch(struct tb_switch *sw)
865 {
866 struct tb_port *port;
867
868 pm_runtime_get_sync(&sw->dev);
869
870 tb_switch_for_each_port(sw, port)
871 tb_scan_port(port);
872
873 pm_runtime_mark_last_busy(&sw->dev);
874 pm_runtime_put_autosuspend(&sw->dev);
875 }
876
877 /*
878 * tb_scan_port() - check for and initialize switches below port
879 */
tb_scan_port(struct tb_port * port)880 static void tb_scan_port(struct tb_port *port)
881 {
882 struct tb_cm *tcm = tb_priv(port->sw->tb);
883 struct tb_port *upstream_port;
884 bool discovery = false;
885 struct tb_switch *sw;
886
887 if (tb_is_upstream_port(port))
888 return;
889
890 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
891 !tb_dp_port_is_enabled(port)) {
892 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
893 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
894 false);
895 return;
896 }
897
898 if (port->config.type != TB_TYPE_PORT)
899 return;
900 if (port->dual_link_port && port->link_nr)
901 return; /*
902 * Downstream switch is reachable through two ports.
903 * Only scan on the primary port (link_nr == 0).
904 */
905
906 if (port->usb4)
907 pm_runtime_get_sync(&port->usb4->dev);
908
909 if (tb_wait_for_port(port, false) <= 0)
910 goto out_rpm_put;
911 if (port->remote) {
912 tb_port_dbg(port, "port already has a remote\n");
913 goto out_rpm_put;
914 }
915
916 tb_retimer_scan(port, true);
917
918 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
919 tb_downstream_route(port));
920 if (IS_ERR(sw)) {
921 /*
922 * If there is an error accessing the connected switch
923 * it may be connected to another domain. Also we allow
924 * the other domain to be connected to a max depth switch.
925 */
926 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
927 tb_scan_xdomain(port);
928 goto out_rpm_put;
929 }
930
931 if (tb_switch_configure(sw)) {
932 tb_switch_put(sw);
933 goto out_rpm_put;
934 }
935
936 /*
937 * If there was previously another domain connected remove it
938 * first.
939 */
940 if (port->xdomain) {
941 tb_xdomain_remove(port->xdomain);
942 tb_port_unconfigure_xdomain(port);
943 port->xdomain = NULL;
944 }
945
946 /*
947 * Do not send uevents until we have discovered all existing
948 * tunnels and know which switches were authorized already by
949 * the boot firmware.
950 */
951 if (!tcm->hotplug_active) {
952 dev_set_uevent_suppress(&sw->dev, true);
953 discovery = true;
954 }
955
956 /*
957 * At the moment Thunderbolt 2 and beyond (devices with LC) we
958 * can support runtime PM.
959 */
960 sw->rpm = sw->generation > 1;
961
962 if (tb_switch_add(sw)) {
963 tb_switch_put(sw);
964 goto out_rpm_put;
965 }
966
967 /* Link the switches using both links if available */
968 upstream_port = tb_upstream_port(sw);
969 port->remote = upstream_port;
970 upstream_port->remote = port;
971 if (port->dual_link_port && upstream_port->dual_link_port) {
972 port->dual_link_port->remote = upstream_port->dual_link_port;
973 upstream_port->dual_link_port->remote = port->dual_link_port;
974 }
975
976 /* Enable lane bonding if supported */
977 tb_switch_lane_bonding_enable(sw);
978 /* Set the link configured */
979 tb_switch_configure_link(sw);
980 /*
981 * CL0s and CL1 are enabled and supported together.
982 * Silently ignore CLx enabling in case CLx is not supported.
983 */
984 if (discovery)
985 tb_sw_dbg(sw, "discovery, not touching CL states\n");
986 else if (tb_enable_clx(sw))
987 tb_sw_warn(sw, "failed to enable CL states\n");
988
989 if (tb_enable_tmu(sw))
990 tb_sw_warn(sw, "failed to enable TMU\n");
991
992 /*
993 * Configuration valid needs to be set after the TMU has been
994 * enabled for the upstream port of the router so we do it here.
995 */
996 tb_switch_configuration_valid(sw);
997
998 /* Scan upstream retimers */
999 tb_retimer_scan(upstream_port, true);
1000
1001 /*
1002 * Create USB 3.x tunnels only when the switch is plugged to the
1003 * domain. This is because we scan the domain also during discovery
1004 * and want to discover existing USB 3.x tunnels before we create
1005 * any new.
1006 */
1007 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1008 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1009
1010 tb_add_dp_resources(sw);
1011 tb_scan_switch(sw);
1012
1013 out_rpm_put:
1014 if (port->usb4) {
1015 pm_runtime_mark_last_busy(&port->usb4->dev);
1016 pm_runtime_put_autosuspend(&port->usb4->dev);
1017 }
1018 }
1019
tb_deactivate_and_free_tunnel(struct tb_tunnel * tunnel)1020 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1021 {
1022 struct tb_port *src_port, *dst_port;
1023 struct tb *tb;
1024
1025 if (!tunnel)
1026 return;
1027
1028 tb_tunnel_deactivate(tunnel);
1029 list_del(&tunnel->list);
1030
1031 tb = tunnel->tb;
1032 src_port = tunnel->src_port;
1033 dst_port = tunnel->dst_port;
1034
1035 switch (tunnel->type) {
1036 case TB_TUNNEL_DP:
1037 tb_detach_bandwidth_group(src_port);
1038 /*
1039 * In case of DP tunnel make sure the DP IN resource is
1040 * deallocated properly.
1041 */
1042 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1043 /* Now we can allow the domain to runtime suspend again */
1044 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1045 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1046 pm_runtime_mark_last_busy(&src_port->sw->dev);
1047 pm_runtime_put_autosuspend(&src_port->sw->dev);
1048 fallthrough;
1049
1050 case TB_TUNNEL_USB3:
1051 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1052 break;
1053
1054 default:
1055 /*
1056 * PCIe and DMA tunnels do not consume guaranteed
1057 * bandwidth.
1058 */
1059 break;
1060 }
1061
1062 tb_tunnel_free(tunnel);
1063 }
1064
1065 /*
1066 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1067 */
tb_free_invalid_tunnels(struct tb * tb)1068 static void tb_free_invalid_tunnels(struct tb *tb)
1069 {
1070 struct tb_cm *tcm = tb_priv(tb);
1071 struct tb_tunnel *tunnel;
1072 struct tb_tunnel *n;
1073
1074 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1075 if (tb_tunnel_is_invalid(tunnel))
1076 tb_deactivate_and_free_tunnel(tunnel);
1077 }
1078 }
1079
1080 /*
1081 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1082 */
tb_free_unplugged_children(struct tb_switch * sw)1083 static void tb_free_unplugged_children(struct tb_switch *sw)
1084 {
1085 struct tb_port *port;
1086
1087 tb_switch_for_each_port(sw, port) {
1088 if (!tb_port_has_remote(port))
1089 continue;
1090
1091 if (port->remote->sw->is_unplugged) {
1092 tb_retimer_remove_all(port);
1093 tb_remove_dp_resources(port->remote->sw);
1094 tb_switch_unconfigure_link(port->remote->sw);
1095 tb_switch_lane_bonding_disable(port->remote->sw);
1096 tb_switch_remove(port->remote->sw);
1097 port->remote = NULL;
1098 if (port->dual_link_port)
1099 port->dual_link_port->remote = NULL;
1100 } else {
1101 tb_free_unplugged_children(port->remote->sw);
1102 }
1103 }
1104 }
1105
tb_find_pcie_down(struct tb_switch * sw,const struct tb_port * port)1106 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1107 const struct tb_port *port)
1108 {
1109 struct tb_port *down = NULL;
1110
1111 /*
1112 * To keep plugging devices consistently in the same PCIe
1113 * hierarchy, do mapping here for switch downstream PCIe ports.
1114 */
1115 if (tb_switch_is_usb4(sw)) {
1116 down = usb4_switch_map_pcie_down(sw, port);
1117 } else if (!tb_route(sw)) {
1118 int phy_port = tb_phy_port_from_link(port->port);
1119 int index;
1120
1121 /*
1122 * Hard-coded Thunderbolt port to PCIe down port mapping
1123 * per controller.
1124 */
1125 if (tb_switch_is_cactus_ridge(sw) ||
1126 tb_switch_is_alpine_ridge(sw))
1127 index = !phy_port ? 6 : 7;
1128 else if (tb_switch_is_falcon_ridge(sw))
1129 index = !phy_port ? 6 : 8;
1130 else if (tb_switch_is_titan_ridge(sw))
1131 index = !phy_port ? 8 : 9;
1132 else
1133 goto out;
1134
1135 /* Validate the hard-coding */
1136 if (WARN_ON(index > sw->config.max_port_number))
1137 goto out;
1138
1139 down = &sw->ports[index];
1140 }
1141
1142 if (down) {
1143 if (WARN_ON(!tb_port_is_pcie_down(down)))
1144 goto out;
1145 if (tb_pci_port_is_enabled(down))
1146 goto out;
1147
1148 return down;
1149 }
1150
1151 out:
1152 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1153 }
1154
1155 static void
tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group * group)1156 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1157 {
1158 struct tb_tunnel *first_tunnel;
1159 struct tb *tb = group->tb;
1160 struct tb_port *in;
1161 int ret;
1162
1163 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1164 group->index);
1165
1166 first_tunnel = NULL;
1167 list_for_each_entry(in, &group->ports, group_list) {
1168 int estimated_bw, estimated_up, estimated_down;
1169 struct tb_tunnel *tunnel;
1170 struct tb_port *out;
1171
1172 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1173 continue;
1174
1175 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1176 if (WARN_ON(!tunnel))
1177 break;
1178
1179 if (!first_tunnel) {
1180 /*
1181 * Since USB3 bandwidth is shared by all DP
1182 * tunnels under the host router USB4 port, even
1183 * if they do not begin from the host router, we
1184 * can release USB3 bandwidth just once and not
1185 * for each tunnel separately.
1186 */
1187 first_tunnel = tunnel;
1188 ret = tb_release_unused_usb3_bandwidth(tb,
1189 first_tunnel->src_port, first_tunnel->dst_port);
1190 if (ret) {
1191 tb_port_warn(in,
1192 "failed to release unused bandwidth\n");
1193 break;
1194 }
1195 }
1196
1197 out = tunnel->dst_port;
1198 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1199 &estimated_down);
1200 if (ret) {
1201 tb_port_warn(in,
1202 "failed to re-calculate estimated bandwidth\n");
1203 break;
1204 }
1205
1206 /*
1207 * Estimated bandwidth includes:
1208 * - already allocated bandwidth for the DP tunnel
1209 * - available bandwidth along the path
1210 * - bandwidth allocated for USB 3.x but not used.
1211 */
1212 tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
1213 estimated_up, estimated_down);
1214
1215 if (in->sw->config.depth < out->sw->config.depth)
1216 estimated_bw = estimated_down;
1217 else
1218 estimated_bw = estimated_up;
1219
1220 if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
1221 tb_port_warn(in, "failed to update estimated bandwidth\n");
1222 }
1223
1224 if (first_tunnel)
1225 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1226 first_tunnel->dst_port);
1227
1228 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1229 }
1230
tb_recalc_estimated_bandwidth(struct tb * tb)1231 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1232 {
1233 struct tb_cm *tcm = tb_priv(tb);
1234 int i;
1235
1236 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1237
1238 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1239 struct tb_bandwidth_group *group = &tcm->groups[i];
1240
1241 if (!list_empty(&group->ports))
1242 tb_recalc_estimated_bandwidth_for_group(group);
1243 }
1244
1245 tb_dbg(tb, "bandwidth re-calculation done\n");
1246 }
1247
tb_find_dp_out(struct tb * tb,struct tb_port * in)1248 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1249 {
1250 struct tb_port *host_port, *port;
1251 struct tb_cm *tcm = tb_priv(tb);
1252
1253 host_port = tb_route(in->sw) ?
1254 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1255
1256 list_for_each_entry(port, &tcm->dp_resources, list) {
1257 if (!tb_port_is_dpout(port))
1258 continue;
1259
1260 if (tb_port_is_enabled(port)) {
1261 tb_port_dbg(port, "DP OUT in use\n");
1262 continue;
1263 }
1264
1265 tb_port_dbg(port, "DP OUT available\n");
1266
1267 /*
1268 * Keep the DP tunnel under the topology starting from
1269 * the same host router downstream port.
1270 */
1271 if (host_port && tb_route(port->sw)) {
1272 struct tb_port *p;
1273
1274 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1275 if (p != host_port)
1276 continue;
1277 }
1278
1279 return port;
1280 }
1281
1282 return NULL;
1283 }
1284
tb_tunnel_dp(struct tb * tb)1285 static void tb_tunnel_dp(struct tb *tb)
1286 {
1287 int available_up, available_down, ret, link_nr;
1288 struct tb_cm *tcm = tb_priv(tb);
1289 struct tb_port *port, *in, *out;
1290 struct tb_tunnel *tunnel;
1291
1292 if (!tb_acpi_may_tunnel_dp()) {
1293 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1294 return;
1295 }
1296
1297 /*
1298 * Find pair of inactive DP IN and DP OUT adapters and then
1299 * establish a DP tunnel between them.
1300 */
1301 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1302
1303 in = NULL;
1304 out = NULL;
1305 list_for_each_entry(port, &tcm->dp_resources, list) {
1306 if (!tb_port_is_dpin(port))
1307 continue;
1308
1309 if (tb_port_is_enabled(port)) {
1310 tb_port_dbg(port, "DP IN in use\n");
1311 continue;
1312 }
1313
1314 tb_port_dbg(port, "DP IN available\n");
1315
1316 out = tb_find_dp_out(tb, port);
1317 if (out) {
1318 in = port;
1319 break;
1320 }
1321 }
1322
1323 if (!in) {
1324 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1325 return;
1326 }
1327 if (!out) {
1328 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
1329 return;
1330 }
1331
1332 /*
1333 * This is only applicable to links that are not bonded (so
1334 * when Thunderbolt 1 hardware is involved somewhere in the
1335 * topology). For these try to share the DP bandwidth between
1336 * the two lanes.
1337 */
1338 link_nr = 1;
1339 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1340 if (tb_tunnel_is_dp(tunnel)) {
1341 link_nr = 0;
1342 break;
1343 }
1344 }
1345
1346 /*
1347 * DP stream needs the domain to be active so runtime resume
1348 * both ends of the tunnel.
1349 *
1350 * This should bring the routers in the middle active as well
1351 * and keeps the domain from runtime suspending while the DP
1352 * tunnel is active.
1353 */
1354 pm_runtime_get_sync(&in->sw->dev);
1355 pm_runtime_get_sync(&out->sw->dev);
1356
1357 if (tb_switch_alloc_dp_resource(in->sw, in)) {
1358 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1359 goto err_rpm_put;
1360 }
1361
1362 if (!tb_attach_bandwidth_group(tcm, in, out))
1363 goto err_dealloc_dp;
1364
1365 /* Make all unused USB3 bandwidth available for the new DP tunnel */
1366 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1367 if (ret) {
1368 tb_warn(tb, "failed to release unused bandwidth\n");
1369 goto err_detach_group;
1370 }
1371
1372 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
1373 if (ret)
1374 goto err_reclaim_usb;
1375
1376 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1377 available_up, available_down);
1378
1379 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1380 available_down);
1381 if (!tunnel) {
1382 tb_port_dbg(out, "could not allocate DP tunnel\n");
1383 goto err_reclaim_usb;
1384 }
1385
1386 if (tb_tunnel_activate(tunnel)) {
1387 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1388 goto err_free;
1389 }
1390
1391 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1392 tb_reclaim_usb3_bandwidth(tb, in, out);
1393
1394 /* Update the domain with the new bandwidth estimation */
1395 tb_recalc_estimated_bandwidth(tb);
1396
1397 /*
1398 * In case of DP tunnel exists, change host router's 1st children
1399 * TMU mode to HiFi for CL0s to work.
1400 */
1401 tb_increase_tmu_accuracy(tunnel);
1402 return;
1403
1404 err_free:
1405 tb_tunnel_free(tunnel);
1406 err_reclaim_usb:
1407 tb_reclaim_usb3_bandwidth(tb, in, out);
1408 err_detach_group:
1409 tb_detach_bandwidth_group(in);
1410 err_dealloc_dp:
1411 tb_switch_dealloc_dp_resource(in->sw, in);
1412 err_rpm_put:
1413 pm_runtime_mark_last_busy(&out->sw->dev);
1414 pm_runtime_put_autosuspend(&out->sw->dev);
1415 pm_runtime_mark_last_busy(&in->sw->dev);
1416 pm_runtime_put_autosuspend(&in->sw->dev);
1417 }
1418
tb_dp_resource_unavailable(struct tb * tb,struct tb_port * port)1419 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1420 {
1421 struct tb_port *in, *out;
1422 struct tb_tunnel *tunnel;
1423
1424 if (tb_port_is_dpin(port)) {
1425 tb_port_dbg(port, "DP IN resource unavailable\n");
1426 in = port;
1427 out = NULL;
1428 } else {
1429 tb_port_dbg(port, "DP OUT resource unavailable\n");
1430 in = NULL;
1431 out = port;
1432 }
1433
1434 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1435 tb_deactivate_and_free_tunnel(tunnel);
1436 list_del_init(&port->list);
1437
1438 /*
1439 * See if there is another DP OUT port that can be used for
1440 * to create another tunnel.
1441 */
1442 tb_recalc_estimated_bandwidth(tb);
1443 tb_tunnel_dp(tb);
1444 }
1445
tb_dp_resource_available(struct tb * tb,struct tb_port * port)1446 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1447 {
1448 struct tb_cm *tcm = tb_priv(tb);
1449 struct tb_port *p;
1450
1451 if (tb_port_is_enabled(port))
1452 return;
1453
1454 list_for_each_entry(p, &tcm->dp_resources, list) {
1455 if (p == port)
1456 return;
1457 }
1458
1459 tb_port_dbg(port, "DP %s resource available\n",
1460 tb_port_is_dpin(port) ? "IN" : "OUT");
1461 list_add_tail(&port->list, &tcm->dp_resources);
1462
1463 /* Look for suitable DP IN <-> DP OUT pairs now */
1464 tb_tunnel_dp(tb);
1465 }
1466
tb_disconnect_and_release_dp(struct tb * tb)1467 static void tb_disconnect_and_release_dp(struct tb *tb)
1468 {
1469 struct tb_cm *tcm = tb_priv(tb);
1470 struct tb_tunnel *tunnel, *n;
1471
1472 /*
1473 * Tear down all DP tunnels and release their resources. They
1474 * will be re-established after resume based on plug events.
1475 */
1476 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1477 if (tb_tunnel_is_dp(tunnel))
1478 tb_deactivate_and_free_tunnel(tunnel);
1479 }
1480
1481 while (!list_empty(&tcm->dp_resources)) {
1482 struct tb_port *port;
1483
1484 port = list_first_entry(&tcm->dp_resources,
1485 struct tb_port, list);
1486 list_del_init(&port->list);
1487 }
1488 }
1489
tb_disconnect_pci(struct tb * tb,struct tb_switch * sw)1490 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1491 {
1492 struct tb_tunnel *tunnel;
1493 struct tb_port *up;
1494
1495 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1496 if (WARN_ON(!up))
1497 return -ENODEV;
1498
1499 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1500 if (WARN_ON(!tunnel))
1501 return -ENODEV;
1502
1503 tb_switch_xhci_disconnect(sw);
1504
1505 tb_tunnel_deactivate(tunnel);
1506 list_del(&tunnel->list);
1507 tb_tunnel_free(tunnel);
1508 return 0;
1509 }
1510
tb_tunnel_pci(struct tb * tb,struct tb_switch * sw)1511 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1512 {
1513 struct tb_port *up, *down, *port;
1514 struct tb_cm *tcm = tb_priv(tb);
1515 struct tb_tunnel *tunnel;
1516
1517 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1518 if (!up)
1519 return 0;
1520
1521 /*
1522 * Look up available down port. Since we are chaining it should
1523 * be found right above this switch.
1524 */
1525 port = tb_switch_downstream_port(sw);
1526 down = tb_find_pcie_down(tb_switch_parent(sw), port);
1527 if (!down)
1528 return 0;
1529
1530 tunnel = tb_tunnel_alloc_pci(tb, up, down);
1531 if (!tunnel)
1532 return -ENOMEM;
1533
1534 if (tb_tunnel_activate(tunnel)) {
1535 tb_port_info(up,
1536 "PCIe tunnel activation failed, aborting\n");
1537 tb_tunnel_free(tunnel);
1538 return -EIO;
1539 }
1540
1541 /*
1542 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1543 * here.
1544 */
1545 if (tb_switch_pcie_l1_enable(sw))
1546 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1547
1548 if (tb_switch_xhci_connect(sw))
1549 tb_sw_warn(sw, "failed to connect xHCI\n");
1550
1551 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1552 return 0;
1553 }
1554
tb_approve_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1555 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1556 int transmit_path, int transmit_ring,
1557 int receive_path, int receive_ring)
1558 {
1559 struct tb_cm *tcm = tb_priv(tb);
1560 struct tb_port *nhi_port, *dst_port;
1561 struct tb_tunnel *tunnel;
1562 struct tb_switch *sw;
1563 int ret;
1564
1565 sw = tb_to_switch(xd->dev.parent);
1566 dst_port = tb_port_at(xd->route, sw);
1567 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1568
1569 mutex_lock(&tb->lock);
1570
1571 /*
1572 * When tunneling DMA paths the link should not enter CL states
1573 * so disable them now.
1574 */
1575 tb_disable_clx(sw);
1576
1577 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1578 transmit_ring, receive_path, receive_ring);
1579 if (!tunnel) {
1580 ret = -ENOMEM;
1581 goto err_clx;
1582 }
1583
1584 if (tb_tunnel_activate(tunnel)) {
1585 tb_port_info(nhi_port,
1586 "DMA tunnel activation failed, aborting\n");
1587 ret = -EIO;
1588 goto err_free;
1589 }
1590
1591 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1592 mutex_unlock(&tb->lock);
1593 return 0;
1594
1595 err_free:
1596 tb_tunnel_free(tunnel);
1597 err_clx:
1598 tb_enable_clx(sw);
1599 mutex_unlock(&tb->lock);
1600
1601 return ret;
1602 }
1603
__tb_disconnect_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1604 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1605 int transmit_path, int transmit_ring,
1606 int receive_path, int receive_ring)
1607 {
1608 struct tb_cm *tcm = tb_priv(tb);
1609 struct tb_port *nhi_port, *dst_port;
1610 struct tb_tunnel *tunnel, *n;
1611 struct tb_switch *sw;
1612
1613 sw = tb_to_switch(xd->dev.parent);
1614 dst_port = tb_port_at(xd->route, sw);
1615 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1616
1617 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1618 if (!tb_tunnel_is_dma(tunnel))
1619 continue;
1620 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1621 continue;
1622
1623 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1624 receive_path, receive_ring))
1625 tb_deactivate_and_free_tunnel(tunnel);
1626 }
1627
1628 /*
1629 * Try to re-enable CL states now, it is OK if this fails
1630 * because we may still have another DMA tunnel active through
1631 * the same host router USB4 downstream port.
1632 */
1633 tb_enable_clx(sw);
1634 }
1635
tb_disconnect_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1636 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1637 int transmit_path, int transmit_ring,
1638 int receive_path, int receive_ring)
1639 {
1640 if (!xd->is_unplugged) {
1641 mutex_lock(&tb->lock);
1642 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1643 transmit_ring, receive_path,
1644 receive_ring);
1645 mutex_unlock(&tb->lock);
1646 }
1647 return 0;
1648 }
1649
1650 /* hotplug handling */
1651
1652 /*
1653 * tb_handle_hotplug() - handle hotplug event
1654 *
1655 * Executes on tb->wq.
1656 */
tb_handle_hotplug(struct work_struct * work)1657 static void tb_handle_hotplug(struct work_struct *work)
1658 {
1659 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1660 struct tb *tb = ev->tb;
1661 struct tb_cm *tcm = tb_priv(tb);
1662 struct tb_switch *sw;
1663 struct tb_port *port;
1664
1665 /* Bring the domain back from sleep if it was suspended */
1666 pm_runtime_get_sync(&tb->dev);
1667
1668 mutex_lock(&tb->lock);
1669 if (!tcm->hotplug_active)
1670 goto out; /* during init, suspend or shutdown */
1671
1672 sw = tb_switch_find_by_route(tb, ev->route);
1673 if (!sw) {
1674 tb_warn(tb,
1675 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1676 ev->route, ev->port, ev->unplug);
1677 goto out;
1678 }
1679 if (ev->port > sw->config.max_port_number) {
1680 tb_warn(tb,
1681 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1682 ev->route, ev->port, ev->unplug);
1683 goto put_sw;
1684 }
1685 port = &sw->ports[ev->port];
1686 if (tb_is_upstream_port(port)) {
1687 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1688 ev->route, ev->port, ev->unplug);
1689 goto put_sw;
1690 }
1691
1692 pm_runtime_get_sync(&sw->dev);
1693
1694 if (ev->unplug) {
1695 tb_retimer_remove_all(port);
1696
1697 if (tb_port_has_remote(port)) {
1698 tb_port_dbg(port, "switch unplugged\n");
1699 tb_sw_set_unplugged(port->remote->sw);
1700 tb_free_invalid_tunnels(tb);
1701 tb_remove_dp_resources(port->remote->sw);
1702 tb_switch_tmu_disable(port->remote->sw);
1703 tb_switch_unconfigure_link(port->remote->sw);
1704 tb_switch_lane_bonding_disable(port->remote->sw);
1705 tb_switch_remove(port->remote->sw);
1706 port->remote = NULL;
1707 if (port->dual_link_port)
1708 port->dual_link_port->remote = NULL;
1709 /* Maybe we can create another DP tunnel */
1710 tb_recalc_estimated_bandwidth(tb);
1711 tb_tunnel_dp(tb);
1712 } else if (port->xdomain) {
1713 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1714
1715 tb_port_dbg(port, "xdomain unplugged\n");
1716 /*
1717 * Service drivers are unbound during
1718 * tb_xdomain_remove() so setting XDomain as
1719 * unplugged here prevents deadlock if they call
1720 * tb_xdomain_disable_paths(). We will tear down
1721 * all the tunnels below.
1722 */
1723 xd->is_unplugged = true;
1724 tb_xdomain_remove(xd);
1725 port->xdomain = NULL;
1726 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
1727 tb_xdomain_put(xd);
1728 tb_port_unconfigure_xdomain(port);
1729 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1730 tb_dp_resource_unavailable(tb, port);
1731 } else if (!port->port) {
1732 tb_sw_dbg(sw, "xHCI disconnect request\n");
1733 tb_switch_xhci_disconnect(sw);
1734 } else {
1735 tb_port_dbg(port,
1736 "got unplug event for disconnected port, ignoring\n");
1737 }
1738 } else if (port->remote) {
1739 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1740 } else if (!port->port && sw->authorized) {
1741 tb_sw_dbg(sw, "xHCI connect request\n");
1742 tb_switch_xhci_connect(sw);
1743 } else {
1744 if (tb_port_is_null(port)) {
1745 tb_port_dbg(port, "hotplug: scanning\n");
1746 tb_scan_port(port);
1747 if (!port->remote)
1748 tb_port_dbg(port, "hotplug: no switch found\n");
1749 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1750 tb_dp_resource_available(tb, port);
1751 }
1752 }
1753
1754 pm_runtime_mark_last_busy(&sw->dev);
1755 pm_runtime_put_autosuspend(&sw->dev);
1756
1757 put_sw:
1758 tb_switch_put(sw);
1759 out:
1760 mutex_unlock(&tb->lock);
1761
1762 pm_runtime_mark_last_busy(&tb->dev);
1763 pm_runtime_put_autosuspend(&tb->dev);
1764
1765 kfree(ev);
1766 }
1767
tb_alloc_dp_bandwidth(struct tb_tunnel * tunnel,int * requested_up,int * requested_down)1768 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
1769 int *requested_down)
1770 {
1771 int allocated_up, allocated_down, available_up, available_down, ret;
1772 int requested_up_corrected, requested_down_corrected, granularity;
1773 int max_up, max_down, max_up_rounded, max_down_rounded;
1774 struct tb *tb = tunnel->tb;
1775 struct tb_port *in, *out;
1776
1777 ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
1778 if (ret)
1779 return ret;
1780
1781 in = tunnel->src_port;
1782 out = tunnel->dst_port;
1783
1784 tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
1785 allocated_up, allocated_down);
1786
1787 /*
1788 * If we get rounded up request from graphics side, say HBR2 x 4
1789 * that is 17500 instead of 17280 (this is because of the
1790 * granularity), we allow it too. Here the graphics has already
1791 * negotiated with the DPRX the maximum possible rates (which is
1792 * 17280 in this case).
1793 *
1794 * Since the link cannot go higher than 17280 we use that in our
1795 * calculations but the DP IN adapter Allocated BW write must be
1796 * the same value (17500) otherwise the adapter will mark it as
1797 * failed for graphics.
1798 */
1799 ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
1800 if (ret)
1801 return ret;
1802
1803 ret = usb4_dp_port_granularity(in);
1804 if (ret < 0)
1805 return ret;
1806 granularity = ret;
1807
1808 max_up_rounded = roundup(max_up, granularity);
1809 max_down_rounded = roundup(max_down, granularity);
1810
1811 /*
1812 * This will "fix" the request down to the maximum supported
1813 * rate * lanes if it is at the maximum rounded up level.
1814 */
1815 requested_up_corrected = *requested_up;
1816 if (requested_up_corrected == max_up_rounded)
1817 requested_up_corrected = max_up;
1818 else if (requested_up_corrected < 0)
1819 requested_up_corrected = 0;
1820 requested_down_corrected = *requested_down;
1821 if (requested_down_corrected == max_down_rounded)
1822 requested_down_corrected = max_down;
1823 else if (requested_down_corrected < 0)
1824 requested_down_corrected = 0;
1825
1826 tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
1827 requested_up_corrected, requested_down_corrected);
1828
1829 if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
1830 (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
1831 tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
1832 requested_up_corrected, requested_down_corrected,
1833 max_up_rounded, max_down_rounded);
1834 return -ENOBUFS;
1835 }
1836
1837 if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
1838 (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
1839 /*
1840 * If requested bandwidth is less or equal than what is
1841 * currently allocated to that tunnel we simply change
1842 * the reservation of the tunnel. Since all the tunnels
1843 * going out from the same USB4 port are in the same
1844 * group the released bandwidth will be taken into
1845 * account for the other tunnels automatically below.
1846 */
1847 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
1848 requested_down);
1849 }
1850
1851 /*
1852 * More bandwidth is requested. Release all the potential
1853 * bandwidth from USB3 first.
1854 */
1855 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1856 if (ret)
1857 return ret;
1858
1859 /*
1860 * Then go over all tunnels that cross the same USB4 ports (they
1861 * are also in the same group but we use the same function here
1862 * that we use with the normal bandwidth allocation).
1863 */
1864 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
1865 if (ret)
1866 goto reclaim;
1867
1868 tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
1869 available_up, available_down);
1870
1871 if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
1872 (*requested_down >= 0 && available_down >= requested_down_corrected)) {
1873 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
1874 requested_down);
1875 } else {
1876 ret = -ENOBUFS;
1877 }
1878
1879 reclaim:
1880 tb_reclaim_usb3_bandwidth(tb, in, out);
1881 return ret;
1882 }
1883
tb_handle_dp_bandwidth_request(struct work_struct * work)1884 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
1885 {
1886 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1887 int requested_bw, requested_up, requested_down, ret;
1888 struct tb_port *in, *out;
1889 struct tb_tunnel *tunnel;
1890 struct tb *tb = ev->tb;
1891 struct tb_cm *tcm = tb_priv(tb);
1892 struct tb_switch *sw;
1893
1894 pm_runtime_get_sync(&tb->dev);
1895
1896 mutex_lock(&tb->lock);
1897 if (!tcm->hotplug_active)
1898 goto unlock;
1899
1900 sw = tb_switch_find_by_route(tb, ev->route);
1901 if (!sw) {
1902 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
1903 ev->route);
1904 goto unlock;
1905 }
1906
1907 in = &sw->ports[ev->port];
1908 if (!tb_port_is_dpin(in)) {
1909 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
1910 goto put_sw;
1911 }
1912
1913 tb_port_dbg(in, "handling bandwidth allocation request\n");
1914
1915 if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
1916 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
1917 goto put_sw;
1918 }
1919
1920 ret = usb4_dp_port_requested_bandwidth(in);
1921 if (ret < 0) {
1922 if (ret == -ENODATA)
1923 tb_port_dbg(in, "no bandwidth request active\n");
1924 else
1925 tb_port_warn(in, "failed to read requested bandwidth\n");
1926 goto put_sw;
1927 }
1928 requested_bw = ret;
1929
1930 tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
1931
1932 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1933 if (!tunnel) {
1934 tb_port_warn(in, "failed to find tunnel\n");
1935 goto put_sw;
1936 }
1937
1938 out = tunnel->dst_port;
1939
1940 if (in->sw->config.depth < out->sw->config.depth) {
1941 requested_up = -1;
1942 requested_down = requested_bw;
1943 } else {
1944 requested_up = requested_bw;
1945 requested_down = -1;
1946 }
1947
1948 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
1949 if (ret) {
1950 if (ret == -ENOBUFS)
1951 tb_port_warn(in, "not enough bandwidth available\n");
1952 else
1953 tb_port_warn(in, "failed to change bandwidth allocation\n");
1954 } else {
1955 tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
1956 requested_up, requested_down);
1957
1958 /* Update other clients about the allocation change */
1959 tb_recalc_estimated_bandwidth(tb);
1960 }
1961
1962 put_sw:
1963 tb_switch_put(sw);
1964 unlock:
1965 mutex_unlock(&tb->lock);
1966
1967 pm_runtime_mark_last_busy(&tb->dev);
1968 pm_runtime_put_autosuspend(&tb->dev);
1969
1970 kfree(ev);
1971 }
1972
tb_queue_dp_bandwidth_request(struct tb * tb,u64 route,u8 port)1973 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
1974 {
1975 struct tb_hotplug_event *ev;
1976
1977 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
1978 if (!ev)
1979 return;
1980
1981 ev->tb = tb;
1982 ev->route = route;
1983 ev->port = port;
1984 INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
1985 queue_work(tb->wq, &ev->work);
1986 }
1987
tb_handle_notification(struct tb * tb,u64 route,const struct cfg_error_pkg * error)1988 static void tb_handle_notification(struct tb *tb, u64 route,
1989 const struct cfg_error_pkg *error)
1990 {
1991
1992 switch (error->error) {
1993 case TB_CFG_ERROR_PCIE_WAKE:
1994 case TB_CFG_ERROR_DP_CON_CHANGE:
1995 case TB_CFG_ERROR_DPTX_DISCOVERY:
1996 if (tb_cfg_ack_notification(tb->ctl, route, error))
1997 tb_warn(tb, "could not ack notification on %llx\n",
1998 route);
1999 break;
2000
2001 case TB_CFG_ERROR_DP_BW:
2002 if (tb_cfg_ack_notification(tb->ctl, route, error))
2003 tb_warn(tb, "could not ack notification on %llx\n",
2004 route);
2005 tb_queue_dp_bandwidth_request(tb, route, error->port);
2006 break;
2007
2008 default:
2009 /* Ignore for now */
2010 break;
2011 }
2012 }
2013
2014 /*
2015 * tb_schedule_hotplug_handler() - callback function for the control channel
2016 *
2017 * Delegates to tb_handle_hotplug.
2018 */
tb_handle_event(struct tb * tb,enum tb_cfg_pkg_type type,const void * buf,size_t size)2019 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2020 const void *buf, size_t size)
2021 {
2022 const struct cfg_event_pkg *pkg = buf;
2023 u64 route = tb_cfg_get_route(&pkg->header);
2024
2025 switch (type) {
2026 case TB_CFG_PKG_ERROR:
2027 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2028 return;
2029 case TB_CFG_PKG_EVENT:
2030 break;
2031 default:
2032 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2033 return;
2034 }
2035
2036 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2037 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2038 pkg->port);
2039 }
2040
2041 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2042 }
2043
tb_stop(struct tb * tb)2044 static void tb_stop(struct tb *tb)
2045 {
2046 struct tb_cm *tcm = tb_priv(tb);
2047 struct tb_tunnel *tunnel;
2048 struct tb_tunnel *n;
2049
2050 cancel_delayed_work(&tcm->remove_work);
2051 /* tunnels are only present after everything has been initialized */
2052 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2053 /*
2054 * DMA tunnels require the driver to be functional so we
2055 * tear them down. Other protocol tunnels can be left
2056 * intact.
2057 */
2058 if (tb_tunnel_is_dma(tunnel))
2059 tb_tunnel_deactivate(tunnel);
2060 tb_tunnel_free(tunnel);
2061 }
2062 tb_switch_remove(tb->root_switch);
2063 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2064 }
2065
tb_scan_finalize_switch(struct device * dev,void * data)2066 static int tb_scan_finalize_switch(struct device *dev, void *data)
2067 {
2068 if (tb_is_switch(dev)) {
2069 struct tb_switch *sw = tb_to_switch(dev);
2070
2071 /*
2072 * If we found that the switch was already setup by the
2073 * boot firmware, mark it as authorized now before we
2074 * send uevent to userspace.
2075 */
2076 if (sw->boot)
2077 sw->authorized = 1;
2078
2079 dev_set_uevent_suppress(dev, false);
2080 kobject_uevent(&dev->kobj, KOBJ_ADD);
2081 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2082 }
2083
2084 return 0;
2085 }
2086
tb_start(struct tb * tb)2087 static int tb_start(struct tb *tb)
2088 {
2089 struct tb_cm *tcm = tb_priv(tb);
2090 int ret;
2091
2092 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2093 if (IS_ERR(tb->root_switch))
2094 return PTR_ERR(tb->root_switch);
2095
2096 /*
2097 * ICM firmware upgrade needs running firmware and in native
2098 * mode that is not available so disable firmware upgrade of the
2099 * root switch.
2100 *
2101 * However, USB4 routers support NVM firmware upgrade if they
2102 * implement the necessary router operations.
2103 */
2104 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2105 /* All USB4 routers support runtime PM */
2106 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2107
2108 ret = tb_switch_configure(tb->root_switch);
2109 if (ret) {
2110 tb_switch_put(tb->root_switch);
2111 return ret;
2112 }
2113
2114 /* Announce the switch to the world */
2115 ret = tb_switch_add(tb->root_switch);
2116 if (ret) {
2117 tb_switch_put(tb->root_switch);
2118 return ret;
2119 }
2120
2121 /*
2122 * To support highest CLx state, we set host router's TMU to
2123 * Normal mode.
2124 */
2125 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2126 /* Enable TMU if it is off */
2127 tb_switch_tmu_enable(tb->root_switch);
2128 /* Full scan to discover devices added before the driver was loaded. */
2129 tb_scan_switch(tb->root_switch);
2130 /* Find out tunnels created by the boot firmware */
2131 tb_discover_tunnels(tb);
2132 /* Add DP resources from the DP tunnels created by the boot firmware */
2133 tb_discover_dp_resources(tb);
2134 /*
2135 * If the boot firmware did not create USB 3.x tunnels create them
2136 * now for the whole topology.
2137 */
2138 tb_create_usb3_tunnels(tb->root_switch);
2139 /* Add DP IN resources for the root switch */
2140 tb_add_dp_resources(tb->root_switch);
2141 /* Make the discovered switches available to the userspace */
2142 device_for_each_child(&tb->root_switch->dev, NULL,
2143 tb_scan_finalize_switch);
2144
2145 /* Allow tb_handle_hotplug to progress events */
2146 tcm->hotplug_active = true;
2147 return 0;
2148 }
2149
tb_suspend_noirq(struct tb * tb)2150 static int tb_suspend_noirq(struct tb *tb)
2151 {
2152 struct tb_cm *tcm = tb_priv(tb);
2153
2154 tb_dbg(tb, "suspending...\n");
2155 tb_disconnect_and_release_dp(tb);
2156 tb_switch_suspend(tb->root_switch, false);
2157 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2158 tb_dbg(tb, "suspend finished\n");
2159
2160 return 0;
2161 }
2162
tb_restore_children(struct tb_switch * sw)2163 static void tb_restore_children(struct tb_switch *sw)
2164 {
2165 struct tb_port *port;
2166
2167 /* No need to restore if the router is already unplugged */
2168 if (sw->is_unplugged)
2169 return;
2170
2171 if (tb_enable_clx(sw))
2172 tb_sw_warn(sw, "failed to re-enable CL states\n");
2173
2174 if (tb_enable_tmu(sw))
2175 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2176
2177 tb_switch_configuration_valid(sw);
2178
2179 tb_switch_for_each_port(sw, port) {
2180 if (!tb_port_has_remote(port) && !port->xdomain)
2181 continue;
2182
2183 if (port->remote) {
2184 tb_switch_lane_bonding_enable(port->remote->sw);
2185 tb_switch_configure_link(port->remote->sw);
2186
2187 tb_restore_children(port->remote->sw);
2188 } else if (port->xdomain) {
2189 tb_port_configure_xdomain(port, port->xdomain);
2190 }
2191 }
2192 }
2193
tb_resume_noirq(struct tb * tb)2194 static int tb_resume_noirq(struct tb *tb)
2195 {
2196 struct tb_cm *tcm = tb_priv(tb);
2197 struct tb_tunnel *tunnel, *n;
2198 unsigned int usb3_delay = 0;
2199 LIST_HEAD(tunnels);
2200
2201 tb_dbg(tb, "resuming...\n");
2202
2203 /* remove any pci devices the firmware might have setup */
2204 tb_switch_reset(tb->root_switch);
2205
2206 tb_switch_resume(tb->root_switch);
2207 tb_free_invalid_tunnels(tb);
2208 tb_free_unplugged_children(tb->root_switch);
2209 tb_restore_children(tb->root_switch);
2210
2211 /*
2212 * If we get here from suspend to disk the boot firmware or the
2213 * restore kernel might have created tunnels of its own. Since
2214 * we cannot be sure they are usable for us we find and tear
2215 * them down.
2216 */
2217 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2218 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2219 if (tb_tunnel_is_usb3(tunnel))
2220 usb3_delay = 500;
2221 tb_tunnel_deactivate(tunnel);
2222 tb_tunnel_free(tunnel);
2223 }
2224
2225 /* Re-create our tunnels now */
2226 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2227 /* USB3 requires delay before it can be re-activated */
2228 if (tb_tunnel_is_usb3(tunnel)) {
2229 msleep(usb3_delay);
2230 /* Only need to do it once */
2231 usb3_delay = 0;
2232 }
2233 tb_tunnel_restart(tunnel);
2234 }
2235 if (!list_empty(&tcm->tunnel_list)) {
2236 /*
2237 * the pcie links need some time to get going.
2238 * 100ms works for me...
2239 */
2240 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2241 msleep(100);
2242 }
2243 /* Allow tb_handle_hotplug to progress events */
2244 tcm->hotplug_active = true;
2245 tb_dbg(tb, "resume finished\n");
2246
2247 return 0;
2248 }
2249
tb_free_unplugged_xdomains(struct tb_switch * sw)2250 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2251 {
2252 struct tb_port *port;
2253 int ret = 0;
2254
2255 tb_switch_for_each_port(sw, port) {
2256 if (tb_is_upstream_port(port))
2257 continue;
2258 if (port->xdomain && port->xdomain->is_unplugged) {
2259 tb_retimer_remove_all(port);
2260 tb_xdomain_remove(port->xdomain);
2261 tb_port_unconfigure_xdomain(port);
2262 port->xdomain = NULL;
2263 ret++;
2264 } else if (port->remote) {
2265 ret += tb_free_unplugged_xdomains(port->remote->sw);
2266 }
2267 }
2268
2269 return ret;
2270 }
2271
tb_freeze_noirq(struct tb * tb)2272 static int tb_freeze_noirq(struct tb *tb)
2273 {
2274 struct tb_cm *tcm = tb_priv(tb);
2275
2276 tcm->hotplug_active = false;
2277 return 0;
2278 }
2279
tb_thaw_noirq(struct tb * tb)2280 static int tb_thaw_noirq(struct tb *tb)
2281 {
2282 struct tb_cm *tcm = tb_priv(tb);
2283
2284 tcm->hotplug_active = true;
2285 return 0;
2286 }
2287
tb_complete(struct tb * tb)2288 static void tb_complete(struct tb *tb)
2289 {
2290 /*
2291 * Release any unplugged XDomains and if there is a case where
2292 * another domain is swapped in place of unplugged XDomain we
2293 * need to run another rescan.
2294 */
2295 mutex_lock(&tb->lock);
2296 if (tb_free_unplugged_xdomains(tb->root_switch))
2297 tb_scan_switch(tb->root_switch);
2298 mutex_unlock(&tb->lock);
2299 }
2300
tb_runtime_suspend(struct tb * tb)2301 static int tb_runtime_suspend(struct tb *tb)
2302 {
2303 struct tb_cm *tcm = tb_priv(tb);
2304
2305 mutex_lock(&tb->lock);
2306 tb_switch_suspend(tb->root_switch, true);
2307 tcm->hotplug_active = false;
2308 mutex_unlock(&tb->lock);
2309
2310 return 0;
2311 }
2312
tb_remove_work(struct work_struct * work)2313 static void tb_remove_work(struct work_struct *work)
2314 {
2315 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2316 struct tb *tb = tcm_to_tb(tcm);
2317
2318 mutex_lock(&tb->lock);
2319 if (tb->root_switch) {
2320 tb_free_unplugged_children(tb->root_switch);
2321 tb_free_unplugged_xdomains(tb->root_switch);
2322 }
2323 mutex_unlock(&tb->lock);
2324 }
2325
tb_runtime_resume(struct tb * tb)2326 static int tb_runtime_resume(struct tb *tb)
2327 {
2328 struct tb_cm *tcm = tb_priv(tb);
2329 struct tb_tunnel *tunnel, *n;
2330
2331 mutex_lock(&tb->lock);
2332 tb_switch_resume(tb->root_switch);
2333 tb_free_invalid_tunnels(tb);
2334 tb_restore_children(tb->root_switch);
2335 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2336 tb_tunnel_restart(tunnel);
2337 tcm->hotplug_active = true;
2338 mutex_unlock(&tb->lock);
2339
2340 /*
2341 * Schedule cleanup of any unplugged devices. Run this in a
2342 * separate thread to avoid possible deadlock if the device
2343 * removal runtime resumes the unplugged device.
2344 */
2345 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2346 return 0;
2347 }
2348
2349 static const struct tb_cm_ops tb_cm_ops = {
2350 .start = tb_start,
2351 .stop = tb_stop,
2352 .suspend_noirq = tb_suspend_noirq,
2353 .resume_noirq = tb_resume_noirq,
2354 .freeze_noirq = tb_freeze_noirq,
2355 .thaw_noirq = tb_thaw_noirq,
2356 .complete = tb_complete,
2357 .runtime_suspend = tb_runtime_suspend,
2358 .runtime_resume = tb_runtime_resume,
2359 .handle_event = tb_handle_event,
2360 .disapprove_switch = tb_disconnect_pci,
2361 .approve_switch = tb_tunnel_pci,
2362 .approve_xdomain_paths = tb_approve_xdomain_paths,
2363 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
2364 };
2365
2366 /*
2367 * During suspend the Thunderbolt controller is reset and all PCIe
2368 * tunnels are lost. The NHI driver will try to reestablish all tunnels
2369 * during resume. This adds device links between the tunneled PCIe
2370 * downstream ports and the NHI so that the device core will make sure
2371 * NHI is resumed first before the rest.
2372 */
tb_apple_add_links(struct tb_nhi * nhi)2373 static bool tb_apple_add_links(struct tb_nhi *nhi)
2374 {
2375 struct pci_dev *upstream, *pdev;
2376 bool ret;
2377
2378 if (!x86_apple_machine)
2379 return false;
2380
2381 switch (nhi->pdev->device) {
2382 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2383 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2384 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2385 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2386 break;
2387 default:
2388 return false;
2389 }
2390
2391 upstream = pci_upstream_bridge(nhi->pdev);
2392 while (upstream) {
2393 if (!pci_is_pcie(upstream))
2394 return false;
2395 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2396 break;
2397 upstream = pci_upstream_bridge(upstream);
2398 }
2399
2400 if (!upstream)
2401 return false;
2402
2403 /*
2404 * For each hotplug downstream port, create add device link
2405 * back to NHI so that PCIe tunnels can be re-established after
2406 * sleep.
2407 */
2408 ret = false;
2409 for_each_pci_bridge(pdev, upstream->subordinate) {
2410 const struct device_link *link;
2411
2412 if (!pci_is_pcie(pdev))
2413 continue;
2414 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2415 !pdev->is_hotplug_bridge)
2416 continue;
2417
2418 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2419 DL_FLAG_AUTOREMOVE_SUPPLIER |
2420 DL_FLAG_PM_RUNTIME);
2421 if (link) {
2422 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2423 dev_name(&pdev->dev));
2424 ret = true;
2425 } else {
2426 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2427 dev_name(&pdev->dev));
2428 }
2429 }
2430
2431 return ret;
2432 }
2433
tb_probe(struct tb_nhi * nhi)2434 struct tb *tb_probe(struct tb_nhi *nhi)
2435 {
2436 struct tb_cm *tcm;
2437 struct tb *tb;
2438
2439 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
2440 if (!tb)
2441 return NULL;
2442
2443 if (tb_acpi_may_tunnel_pcie())
2444 tb->security_level = TB_SECURITY_USER;
2445 else
2446 tb->security_level = TB_SECURITY_NOPCIE;
2447
2448 tb->cm_ops = &tb_cm_ops;
2449
2450 tcm = tb_priv(tb);
2451 INIT_LIST_HEAD(&tcm->tunnel_list);
2452 INIT_LIST_HEAD(&tcm->dp_resources);
2453 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
2454 tb_init_bandwidth_groups(tcm);
2455
2456 tb_dbg(tb, "using software connection manager\n");
2457
2458 /*
2459 * Device links are needed to make sure we establish tunnels
2460 * before the PCIe/USB stack is resumed so complain here if we
2461 * found them missing.
2462 */
2463 if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
2464 tb_warn(tb, "device links to tunneled native ports are missing!\n");
2465
2466 return tb;
2467 }
2468